aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--CREDITS4
-rw-r--r--Documentation/ABI/testing/sysfs-block64
-rw-r--r--Documentation/ABI/testing/sysfs-ptp98
-rw-r--r--Documentation/IRQ-affinity.txt17
-rw-r--r--Documentation/blockdev/cciss.txt15
-rw-r--r--Documentation/cachetlb.txt2
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt54
-rw-r--r--Documentation/filesystems/9p.txt29
-rw-r--r--Documentation/filesystems/proc.txt11
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/lockstat.txt2
-rw-r--r--Documentation/ptp/ptp.txt89
-rw-r--r--Documentation/ptp/testptp.c381
-rw-r--r--Documentation/ptp/testptp.mk33
-rw-r--r--Documentation/virtual/uml/UserModeLinux-HOWTO.txt10
-rw-r--r--Documentation/vm/locking2
-rw-r--r--MAINTAINERS24
-rw-r--r--Makefile17
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/Kconfig4
-rw-r--r--arch/alpha/include/asm/gpio.h55
-rw-r--r--arch/alpha/include/asm/smp.h2
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/setup.c2
-rw-r--r--arch/alpha/kernel/smp.c7
-rw-r--r--arch/alpha/kernel/sys_dp264.c2
-rw-r--r--arch/alpha/kernel/sys_titan.c13
-rw-r--r--arch/alpha/mm/init.c2
-rw-r--r--arch/alpha/mm/numa.c1
-rw-r--r--arch/arm/Kconfig.debug7
-rw-r--r--arch/arm/include/asm/smp.h6
-rw-r--r--arch/arm/include/asm/tlb.h53
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h78
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c4
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c11
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c4
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c4
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c4
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c4
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c4
-rw-r--r--arch/arm/mach-omap2/board-overo.c4
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-display.c2
-rw-r--r--arch/arm/mach-omap2/display.c77
-rw-r--r--arch/arm/mach-omap2/include/mach/board-zoom.h2
-rw-r--r--arch/arm/mach-shmobile/Makefile5
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c118
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c30
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c2
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c272
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c21
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c19
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c92
-rw-r--r--arch/arm/mach-shmobile/headsmp.S2
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h7
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-ap4evb.txt3
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-mackerel.txt3
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h1
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh73a0.h30
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c46
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c108
-rw-r--r--arch/arm/mach-shmobile/setup-sh7367.c223
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c217
-rw-r--r--arch/arm/mach-shmobile/setup-sh7377.c239
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c244
-rw-r--r--arch/arm/mach-shmobile/sleep-sh7372.S260
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c9
-rw-r--r--arch/arm/mach-shmobile/suspend.c47
-rw-r--r--arch/arm/mach-ux500/Kconfig3
-rw-r--r--arch/arm/mach-ux500/Makefile4
-rw-r--r--arch/arm/mach-ux500/cpu-db5500.c2
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c7
-rw-r--r--arch/arm/mach-ux500/cpu.c7
-rw-r--r--arch/arm/mach-ux500/cpufreq.c211
-rw-r--r--arch/arm/mach-ux500/include/mach/db5500-regs.h20
-rw-r--r--arch/arm/mach-ux500/include/mach/db8500-regs.h37
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h1
-rw-r--r--arch/arm/mach-ux500/include/mach/id.h20
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-mop500.h5
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-board-u5500.h21
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-db5500.h27
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs-db8500.h54
-rw-r--r--arch/arm/mach-ux500/include/mach/irqs.h46
-rw-r--r--arch/arm/mach-ux500/include/mach/prcmu-defs.h30
-rw-r--r--arch/arm/mach-ux500/include/mach/prcmu.h28
-rw-r--r--arch/arm/mach-ux500/prcmu.c394
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/avr32/mm/init.c2
-rw-r--r--arch/blackfin/Kconfig2
-rw-r--r--arch/blackfin/Kconfig.debug11
-rw-r--r--arch/blackfin/configs/BF527-EZKIT-V2_defconfig12
-rw-r--r--arch/blackfin/configs/BF527-EZKIT_defconfig14
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig2
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig2
-rw-r--r--arch/blackfin/include/asm/bfin-global.h10
-rw-r--r--arch/blackfin/include/asm/bfin_pfmon.h44
-rw-r--r--arch/blackfin/include/asm/bfin_sport.h4
-rw-r--r--arch/blackfin/include/asm/cacheflush.h23
-rw-r--r--arch/blackfin/include/asm/cpu.h3
-rw-r--r--arch/blackfin/include/asm/def_LPBlackfin.h12
-rw-r--r--arch/blackfin/include/asm/irq_handler.h25
-rw-r--r--arch/blackfin/include/asm/kgdb.h6
-rw-r--r--arch/blackfin/include/asm/perf_event.h1
-rw-r--r--arch/blackfin/include/asm/ptrace.h2
-rw-r--r--arch/blackfin/include/mach-common/irq.h57
-rw-r--r--arch/blackfin/kernel/Makefile3
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c5
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c34
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c1
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c1860
-rw-r--r--arch/blackfin/kernel/ipipe.c1
-rw-r--r--arch/blackfin/kernel/irqchip.c1
-rw-r--r--arch/blackfin/kernel/nmi.c8
-rw-r--r--arch/blackfin/kernel/perf_event.c498
-rw-r--r--arch/blackfin/kernel/process.c6
-rw-r--r--arch/blackfin/kernel/reboot.c65
-rw-r--r--arch/blackfin/kernel/setup.c54
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S8
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h4
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF512.h16
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF512.h8
-rw-r--r--arch/blackfin/mach-bf518/include/mach/irq.h262
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c74
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h8
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF522.h16
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF522.h8
-rw-r--r--arch/blackfin/mach-bf527/include/mach/irq.h266
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h11
-rw-r--r--arch/blackfin/mach-bf533/include/mach/irq.h168
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c106
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h10
-rw-r--r--arch/blackfin/mach-bf537/include/mach/irq.h365
-rw-r--r--arch/blackfin/mach-bf537/ints-priority.c163
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h9
-rw-r--r--arch/blackfin/mach-bf538/include/mach/irq.h89
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c116
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h8
-rw-r--r--arch/blackfin/mach-bf548/include/mach/irq.h89
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c10
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h15
-rw-r--r--arch/blackfin/mach-bf561/include/mach/irq.h505
-rw-r--r--arch/blackfin/mach-bf561/smp.c17
-rw-r--r--arch/blackfin/mach-common/dpmc.c7
-rw-r--r--arch/blackfin/mach-common/ints-priority.c476
-rw-r--r--arch/blackfin/mach-common/smp.c28
-rw-r--r--arch/blackfin/mm/sram-alloc.c43
-rw-r--r--arch/cris/arch-v32/kernel/irq.c4
-rw-r--r--arch/cris/arch-v32/kernel/smp.c33
-rw-r--r--arch/cris/mm/init.c2
-rw-r--r--arch/frv/mm/init.c2
-rw-r--r--arch/ia64/include/asm/tlb.h66
-rw-r--r--arch/ia64/mm/contig.c10
-rw-r--r--arch/ia64/mm/discontig.c10
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/m32r/Kconfig.debug9
-rw-r--r--arch/m32r/include/asm/smp.h2
-rw-r--r--arch/m32r/mm/discontig.c1
-rw-r--r--arch/m32r/mm/init.c2
-rw-r--r--arch/m68k/mm/init_mm.c2
-rw-r--r--arch/microblaze/mm/init.c2
-rw-r--r--arch/mips/Kconfig.debug9
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mn10300/kernel/irq.c16
-rw-r--r--arch/mn10300/kernel/smp.c75
-rw-r--r--arch/mn10300/mm/cache-smp.c8
-rw-r--r--arch/mn10300/mm/init.c2
-rw-r--r--arch/mn10300/mm/tlb-smp.c32
-rw-r--r--arch/parisc/include/asm/smp.h9
-rw-r--r--arch/parisc/mm/init.c4
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/Kconfig.debug21
-rw-r--r--arch/powerpc/boot/dts/mpc8313erdb.dts13
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dts13
-rw-r--r--arch/powerpc/boot/dts/p2020ds.dts13
-rw-r--r--arch/powerpc/boot/dts/p2020rdb.dts13
-rw-r--r--arch/powerpc/include/asm/pgalloc.h21
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/kernel/process.c23
-rw-r--r--arch/powerpc/mm/pgtable.c104
-rw-r--r--arch/powerpc/mm/tlb_hash32.c3
-rw-r--r--arch/powerpc/mm/tlb_hash64.c5
-rw-r--r--arch/powerpc/mm/tlb_nohash.c3
-rw-r--r--arch/s390/include/asm/tlb.h62
-rw-r--r--arch/s390/mm/pgtable.c1
-rw-r--r--arch/score/Kconfig.debug9
-rw-r--r--arch/score/mm/init.c2
-rw-r--r--arch/sh/Kconfig.debug9
-rw-r--r--arch/sh/include/asm/tlb.h28
-rw-r--r--arch/sh/mm/init.c1
-rw-r--r--arch/sparc/Kconfig.debug9
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h3
-rw-r--r--arch/sparc/include/asm/pgtable_64.h15
-rw-r--r--arch/sparc/include/asm/tlb_64.h91
-rw-r--r--arch/sparc/include/asm/tlbflush_64.h12
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/mm/init_32.c4
-rw-r--r--arch/sparc/mm/tlb.c43
-rw-r--r--arch/sparc/mm/tsb.c15
-rw-r--r--arch/tile/Kconfig8
-rw-r--r--arch/tile/Kconfig.debug9
-rw-r--r--arch/tile/configs/tile_defconfig71
-rw-r--r--arch/tile/configs/tilegx_defconfig1833
-rw-r--r--arch/tile/configs/tilepro_defconfig1163
-rw-r--r--arch/tile/include/arch/chip_tilegx.h258
-rw-r--r--arch/tile/include/arch/icache.h11
-rw-r--r--arch/tile/include/arch/interrupts_64.h276
-rw-r--r--arch/tile/include/arch/spr_def.h13
-rw-r--r--arch/tile/include/arch/spr_def_64.h173
-rw-r--r--arch/tile/include/asm/atomic.h49
-rw-r--r--arch/tile/include/asm/atomic_32.h10
-rw-r--r--arch/tile/include/asm/atomic_64.h156
-rw-r--r--arch/tile/include/asm/backtrace.h82
-rw-r--r--arch/tile/include/asm/bitops.h1
-rw-r--r--arch/tile/include/asm/bitops_32.h1
-rw-r--r--arch/tile/include/asm/bitops_64.h105
-rw-r--r--arch/tile/include/asm/cacheflush.h18
-rw-r--r--arch/tile/include/asm/compat.h4
-rw-r--r--arch/tile/include/asm/dma-mapping.h3
-rw-r--r--arch/tile/include/asm/fb.h1
-rw-r--r--arch/tile/include/asm/io.h18
-rw-r--r--arch/tile/include/asm/irq.h2
-rw-r--r--arch/tile/include/asm/mmu_context.h4
-rw-r--r--arch/tile/include/asm/opcode-tile_32.h7
-rw-r--r--arch/tile/include/asm/opcode-tile_64.h1500
-rw-r--r--arch/tile/include/asm/opcode_constants_64.h1043
-rw-r--r--arch/tile/include/asm/page.h18
-rw-r--r--arch/tile/include/asm/parport.h1
-rw-r--r--arch/tile/include/asm/pci.h3
-rw-r--r--arch/tile/include/asm/pgtable_64.h175
-rw-r--r--arch/tile/include/asm/processor.h9
-rw-r--r--arch/tile/include/asm/serial.h1
-rw-r--r--arch/tile/include/asm/signal.h4
-rw-r--r--arch/tile/include/asm/spinlock_64.h161
-rw-r--r--arch/tile/include/asm/stat.h2
-rw-r--r--arch/tile/include/asm/swab.h6
-rw-r--r--arch/tile/include/asm/thread_info.h5
-rw-r--r--arch/tile/include/asm/topology.h75
-rw-r--r--arch/tile/include/asm/traps.h4
-rw-r--r--arch/tile/include/asm/unistd.h2
-rw-r--r--arch/tile/include/asm/vga.h (renamed from arch/tile/include/hv/pagesize.h)35
-rw-r--r--arch/tile/include/hv/hypervisor.h12
-rw-r--r--arch/tile/kernel/backtrace.c103
-rw-r--r--arch/tile/kernel/compat.c13
-rw-r--r--arch/tile/kernel/compat_signal.c4
-rw-r--r--arch/tile/kernel/futex_64.S55
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/head_64.S269
-rw-r--r--arch/tile/kernel/intvec_32.S175
-rw-r--r--arch/tile/kernel/intvec_64.S1231
-rw-r--r--arch/tile/kernel/module.c8
-rw-r--r--arch/tile/kernel/pci-dma.c2
-rw-r--r--arch/tile/kernel/pci.c206
-rw-r--r--arch/tile/kernel/process.c52
-rw-r--r--arch/tile/kernel/regs_64.S145
-rw-r--r--arch/tile/kernel/setup.c6
-rw-r--r--arch/tile/kernel/signal.c128
-rw-r--r--arch/tile/kernel/single_step.c12
-rw-r--r--arch/tile/kernel/stack.c14
-rw-r--r--arch/tile/kernel/sys.c9
-rw-r--r--arch/tile/kernel/tile-desc_32.c11
-rw-r--r--arch/tile/kernel/tile-desc_64.c2200
-rw-r--r--arch/tile/kernel/time.c2
-rw-r--r--arch/tile/kernel/tlb.c12
-rw-r--r--arch/tile/kernel/traps.c1
-rw-r--r--arch/tile/lib/atomic_asm_32.S2
-rw-r--r--arch/tile/lib/cacheflush.c18
-rw-r--r--arch/tile/lib/memchr_64.c71
-rw-r--r--arch/tile/lib/memcpy_64.c220
-rw-r--r--arch/tile/lib/memcpy_user_64.c86
-rw-r--r--arch/tile/lib/memset_64.c145
-rw-r--r--arch/tile/lib/spinlock_64.c104
-rw-r--r--arch/tile/lib/strchr_64.c67
-rw-r--r--arch/tile/lib/strlen_64.c38
-rw-r--r--arch/tile/lib/usercopy_64.S196
-rw-r--r--arch/tile/mm/fault.c30
-rw-r--r--arch/tile/mm/init.c2
-rw-r--r--arch/tile/mm/migrate_64.S187
-rw-r--r--arch/um/Kconfig.debug16
-rw-r--r--arch/um/drivers/Makefile4
-rw-r--r--arch/um/drivers/mcast.h24
-rw-r--r--arch/um/drivers/mcast_kern.c120
-rw-r--r--arch/um/drivers/mcast_user.c165
-rw-r--r--arch/um/drivers/umcast.h27
-rw-r--r--arch/um/drivers/umcast_kern.c188
-rw-r--r--arch/um/drivers/umcast_user.c186
-rw-r--r--arch/um/drivers/xterm.c2
-rw-r--r--arch/um/include/asm/processor-generic.h2
-rw-r--r--arch/um/include/asm/smp.h1
-rw-r--r--arch/um/include/asm/tlb.h29
-rw-r--r--arch/um/include/shared/os.h7
-rw-r--r--arch/um/kernel/Makefile1
-rw-r--r--arch/um/kernel/early_printk.c33
-rw-r--r--arch/um/kernel/smp.c3
-rw-r--r--arch/um/kernel/trap.c24
-rw-r--r--arch/um/os-Linux/main.c3
-rw-r--r--arch/um/os-Linux/process.c1
-rw-r--r--arch/um/os-Linux/util.c5
-rw-r--r--arch/unicore32/Kconfig.debug7
-rw-r--r--arch/unicore32/mm/init.c2
-rw-r--r--arch/unicore32/mm/mmu.c2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Kconfig.debug20
-rw-r--r--arch/x86/include/asm/io.h24
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/tboot.c1
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/mm/fault.c12
-rw-r--r--arch/x86/mm/hugetlbpage.c4
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/xtensa/include/asm/page.h4
-rw-r--r--arch/xtensa/mm/mmu.c2
-rw-r--r--arch/xtensa/mm/pgtable.c72
-rw-r--r--block/blk-cgroup.c200
-rw-r--r--block/blk-cgroup.h40
-rw-r--r--block/blk-core.c32
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c16
-rw-r--r--block/blk-ioc.c3
-rw-r--r--block/blk-lib.c82
-rw-r--r--block/blk-settings.c9
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/blk-throttle.c313
-rw-r--r--block/blk.h23
-rw-r--r--block/cfq-iosched.c232
-rw-r--r--block/elevator.c11
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/acpi/apei/einj.c8
-rw-r--r--drivers/acpi/atomicio.c4
-rw-r--r--drivers/ata/libata-scsi.c13
-rw-r--r--drivers/base/node.c14
-rw-r--r--drivers/block/Kconfig21
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/cciss.c571
-rw-r--r--drivers/block/cciss.h11
-rw-r--r--drivers/block/cciss_cmd.h11
-rw-r--r--drivers/block/cciss_scsi.c41
-rw-r--r--drivers/block/cciss_scsi.h4
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c6
-rw-r--r--drivers/block/drbd/drbd_int.h19
-rw-r--r--drivers/block/drbd/drbd_main.c37
-rw-r--r--drivers/block/drbd/drbd_nl.c127
-rw-r--r--drivers/block/drbd/drbd_receiver.c68
-rw-r--r--drivers/block/drbd/drbd_req.c20
-rw-r--r--drivers/block/drbd/drbd_req.h5
-rw-r--r--drivers/block/drbd/drbd_worker.c98
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/rbd.c27
-rw-r--r--drivers/block/xen-blkback/Makefile3
-rw-r--r--drivers/block/xen-blkback/blkback.c824
-rw-r--r--drivers/block/xen-blkback/common.h233
-rw-r--r--drivers/block/xen-blkback/xenbus.c768
-rw-r--r--drivers/block/xen-blkfront.c51
-rw-r--r--drivers/cdrom/viocd.c4
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/db8500-cpufreq.c169
-rw-r--r--drivers/dma/shdma.c42
-rw-r--r--drivers/dma/shdma.h2
-rw-r--r--drivers/edac/i3200_edac.c13
-rw-r--r--drivers/gpio/ml_ioh_gpio.c3
-rw-r--r--drivers/gpio/vx855_gpio.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c4
-rw-r--r--drivers/ide/ide-cd.c3
-rw-r--r--drivers/input/input-compat.h2
-rw-r--r--drivers/leds/Kconfig24
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c3
-rw-r--r--drivers/leds/leds-gpio-register.c42
-rw-r--r--drivers/leds/leds-h1940.c170
-rw-r--r--drivers/leds/leds-lm3530.c73
-rw-r--r--drivers/leds/leds-pca9532.c191
-rw-r--r--drivers/leds/leds.h7
-rw-r--r--drivers/leds/ledtrig-timer.c3
-rw-r--r--drivers/media/video/omap/omap_vout.c2
-rw-r--r--drivers/media/video/omap/omap_voutdef.h2
-rw-r--r--drivers/mfd/Kconfig22
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/ab8500-i2c.c3
-rw-r--r--drivers/mfd/db5500-prcmu-regs.h (renamed from arch/arm/mach-ux500/include/mach/prcmu-regs.h)27
-rw-r--r--drivers/mfd/db5500-prcmu.c448
-rw-r--r--drivers/mfd/db8500-prcmu-regs.h166
-rw-r--r--drivers/mfd/db8500-prcmu.c2069
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c195
-rw-r--r--drivers/net/gianfar_ptp.c588
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83640.c1100
-rw-r--r--drivers/net/phy/dp83640_reg.h267
-rw-r--r--drivers/net/tile/tilepro.c8
-rw-r--r--drivers/platform/x86/ibm_rtl.c13
-rw-r--r--drivers/platform/x86/intel_ips.c13
-rw-r--r--drivers/ptp/Kconfig75
-rw-r--r--drivers/ptp/Makefile7
-rw-r--r--drivers/ptp/ptp_chardev.c159
-rw-r--r--drivers/ptp/ptp_clock.c343
-rw-r--r--drivers/ptp/ptp_ixp46x.c332
-rw-r--r--drivers/ptp/ptp_private.h92
-rw-r--r--drivers/ptp/ptp_sysfs.c230
-rw-r--r--drivers/regulator/Kconfig7
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/db8500-prcmu.c558
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-tile.c162
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c21
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/staging/zcache/zcache.c5
-rw-r--r--drivers/tty/serial/68328serial.c2
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/video/Kconfig42
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/amifb.c27
-rw-r--r--drivers/video/backlight/adp5520_bl.c6
-rw-r--r--drivers/video/da8xx-fb.c4
-rw-r--r--drivers/video/efifb.c4
-rw-r--r--drivers/video/mb862xx/Makefile5
-rw-r--r--drivers/video/mb862xx/mb862xx-i2c.c177
-rw-r--r--drivers/video/mb862xx/mb862xx_reg.h58
-rw-r--r--drivers/video/mb862xx/mb862xxfb.h36
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c (renamed from drivers/video/mb862xx/mb862xxfb.c)152
-rw-r--r--drivers/video/omap/dispc.c4
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/video/omap/rfbi.c2
-rw-r--r--drivers/video/omap2/Makefile4
-rw-r--r--drivers/video/omap2/displays/Kconfig9
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c2
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c57
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c2
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c6
-rw-r--r--drivers/video/omap2/displays/panel-taal.c536
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c10
-rw-r--r--drivers/video/omap2/dss/Kconfig33
-rw-r--r--drivers/video/omap2/dss/core.c15
-rw-r--r--drivers/video/omap2/dss/dispc.c1552
-rw-r--r--drivers/video/omap2/dss/dispc.h691
-rw-r--r--drivers/video/omap2/dss/display.c46
-rw-r--r--drivers/video/omap2/dss/dpi.c113
-rw-r--r--drivers/video/omap2/dss/dsi.c2367
-rw-r--r--drivers/video/omap2/dss/dss.c118
-rw-r--r--drivers/video/omap2/dss/dss.h98
-rw-r--r--drivers/video/omap2/dss/dss_features.c105
-rw-r--r--drivers/video/omap2/dss/dss_features.h39
-rw-r--r--drivers/video/omap2/dss/hdmi.c461
-rw-r--r--drivers/video/omap2/dss/hdmi.h222
-rw-r--r--drivers/video/omap2/dss/hdmi_omap4_panel.c2
-rw-r--r--drivers/video/omap2/dss/manager.c14
-rw-r--r--drivers/video/omap2/dss/overlay.c43
-rw-r--r--drivers/video/omap2/dss/rfbi.c176
-rw-r--r--drivers/video/omap2/dss/sdi.c2
-rw-r--r--drivers/video/omap2/dss/venc.c23
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c14
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c231
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c23
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h8
-rw-r--r--drivers/video/s3c-fb.c121
-rw-r--r--drivers/video/s3c2410fb.c8
-rw-r--r--drivers/video/s3fb.c209
-rw-r--r--drivers/video/savage/savagefb-i2c.c2
-rw-r--r--drivers/video/savage/savagefb.h8
-rw-r--r--drivers/video/savage/savagefb_driver.c15
-rw-r--r--drivers/video/sh7760fb.c6
-rw-r--r--drivers/video/sh_mobile_hdmi.c10
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c126
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h1
-rw-r--r--drivers/video/sh_mobile_meram.c567
-rw-r--r--drivers/video/sh_mobile_meram.h41
-rw-r--r--drivers/video/sm501fb.c24
-rw-r--r--drivers/video/udlfb.c20
-rw-r--r--fs/9p/Kconfig5
-rw-r--r--fs/9p/vfs_inode_dotl.c11
-rw-r--r--fs/Kconfig18
-rw-r--r--fs/block_dev.c17
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/caps.c61
-rw-r--r--fs/ceph/dir.c7
-rw-r--r--fs/ceph/export.c25
-rw-r--r--fs/ceph/mds_client.c7
-rw-r--r--fs/ceph/mds_client.h1
-rw-r--r--fs/dcache.c8
-rw-r--r--fs/drop_caches.c5
-rw-r--r--fs/exec.c12
-rw-r--r--fs/fscache/operation.c10
-rw-r--r--fs/fscache/page.c13
-rw-r--r--fs/gfs2/glock.c5
-rw-r--r--fs/gfs2/quota.c12
-rw-r--r--fs/gfs2/quota.h4
-rw-r--r--fs/hugetlbfs/inode.c4
-rw-r--r--fs/inode.c9
-rw-r--r--fs/mbcache.c10
-rw-r--r--fs/ncpfs/inode.c4
-rw-r--r--fs/nfs/dir.c5
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/partitions/check.c8
-rw-r--r--fs/proc/internal.h8
-rw-r--r--fs/proc/task_mmu.c204
-rw-r--r--fs/quota/dquot.c5
-rw-r--r--fs/splice.c33
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c5
-rw-r--r--fs/xfs/quota/xfs_qm.c6
-rw-r--r--include/asm-generic/audit_change_attr.h4
-rw-r--r--include/asm-generic/audit_dir_write.h14
-rw-r--r--include/asm-generic/audit_read.h5
-rw-r--r--include/asm-generic/audit_write.h2
-rw-r--r--include/asm-generic/cacheflush.h5
-rw-r--r--include/asm-generic/resource.h2
-rw-r--r--include/asm-generic/tlb.h156
-rw-r--r--include/asm-generic/unistd.h221
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/bitmap.h5
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h15
-rw-r--r--include/linux/bootmem.h25
-rw-r--r--include/linux/c2port.h3
-rw-r--r--include/linux/ceph/ceph_fs.h1
-rw-r--r--include/linux/compat.h236
-rw-r--r--include/linux/compiler-gcc.h4
-rw-r--r--include/linux/compiler-gcc4.h2
-rw-r--r--include/linux/cpumask.h15
-rw-r--r--include/linux/drbd.h10
-rw-r--r--include/linux/drbd_tag_magic.h2
-rw-r--r--include/linux/fs.h7
-rw-r--r--include/linux/fscache-cache.h12
-rw-r--r--include/linux/genalloc.h25
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gfp.h9
-rw-r--r--include/linux/huge_mm.h8
-rw-r--r--include/linux/kernel.h39
-rw-r--r--include/linux/leds-pca9532.h3
-rw-r--r--include/linux/leds.h2
-rw-r--r--include/linux/lockdep.h3
-rw-r--r--include/linux/lru_cache.h12
-rw-r--r--include/linux/memblock.h9
-rw-r--r--include/linux/mempolicy.h7
-rw-r--r--include/linux/mfd/db5500-prcmu.h45
-rw-r--r--include/linux/mfd/db8500-prcmu.h978
-rw-r--r--include/linux/mm.h121
-rw-r--r--include/linux/mm_types.h19
-rw-r--r--include/linux/mmu_notifier.h2
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--include/linux/mutex.h9
-rw-r--r--include/linux/oom.h2
-rw-r--r--include/linux/pagemap.h15
-rw-r--r--include/linux/percpu_counter.h6
-rw-r--r--include/linux/posix-timers.h1
-rw-r--r--include/linux/printk.h7
-rw-r--r--include/linux/proc_fs.h8
-rw-r--r--include/linux/ptp_classify.h7
-rw-r--r--include/linux/ptp_clock.h84
-rw-r--r--include/linux/ptp_clock_kernel.h139
-rw-r--r--include/linux/regulator/db8500-prcmu.h45
-rw-r--r--include/linux/rmap.h29
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/shmem_fs.h8
-rw-r--r--include/linux/vmstat.h7
-rw-r--r--include/linux/xattr.h8
-rw-r--r--include/net/9p/9p.h13
-rw-r--r--include/net/9p/client.h2
-rw-r--r--include/net/9p/transport.h3
-rw-r--r--include/video/omap-panel-generic-dpi.h (renamed from arch/arm/plat-omap/include/plat/panel-generic-dpi.h)8
-rw-r--r--include/video/omap-panel-nokia-dsi.h (renamed from arch/arm/plat-omap/include/plat/nokia-dsi-panel.h)14
-rw-r--r--include/video/omapdss.h (renamed from arch/arm/plat-omap/include/plat/display.h)114
-rw-r--r--include/video/sh_mobile_lcdc.h3
-rw-r--r--include/video/sh_mobile_meram.h68
-rw-r--r--include/xen/interface/io/blkif.h13
-rw-r--r--init/calibrate.c75
-rw-r--r--init/main.c3
-rw-r--r--kernel/compat.c8
-rw-r--r--kernel/fork.c42
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/irq/proc.c54
-rw-r--r--kernel/mutex.c25
-rw-r--r--kernel/posix-timers.c25
-rw-r--r--kernel/printk.c87
-rw-r--r--kernel/sysctl.c2
-rw-r--r--lib/Kconfig.debug20
-rw-r--r--lib/audit.c2
-rw-r--r--lib/bitmap.c109
-rw-r--r--lib/genalloc.c45
-rw-r--r--lib/kstrtox.c26
-rw-r--r--lib/lru_cache.c2
-rw-r--r--lib/show_mem.c2
-rw-r--r--lib/vsprintf.c2
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/filemap.c71
-rw-r--r--mm/filemap_xip.c4
-rw-r--r--mm/fremap.c4
-rw-r--r--mm/huge_memory.c25
-rw-r--r--mm/hugetlb.c14
-rw-r--r--mm/init-mm.c1
-rw-r--r--mm/internal.h4
-rw-r--r--mm/ksm.c7
-rw-r--r--mm/memcontrol.c13
-rw-r--r--mm/memory-failure.c21
-rw-r--r--mm/memory.c440
-rw-r--r--mm/memory_hotplug.c21
-rw-r--r--mm/mempolicy.c164
-rw-r--r--mm/migrate.c17
-rw-r--r--mm/mmap.c121
-rw-r--r--mm/mremap.c5
-rw-r--r--mm/nobootmem.c23
-rw-r--r--mm/nommu.c108
-rw-r--r--mm/oom_kill.c36
-rw-r--r--mm/page_alloc.c121
-rw-r--r--mm/readahead.c2
-rw-r--r--mm/rmap.c172
-rw-r--r--mm/shmem.c320
-rw-r--r--mm/slub.c3
-rw-r--r--mm/swap.c52
-rw-r--r--mm/swapfile.c6
-rw-r--r--mm/util.c24
-rw-r--r--mm/vmalloc.c15
-rw-r--r--mm/vmscan.c80
-rw-r--r--mm/vmstat.c264
-rw-r--r--net/9p/Kconfig8
-rw-r--r--net/9p/client.c30
-rw-r--r--net/9p/mod.c4
-rw-r--r--net/9p/trans_fd.c7
-rw-r--r--net/9p/util.c2
-rw-r--r--net/ceph/messenger.c82
-rw-r--r--net/ceph/osd_client.c19
-rw-r--r--net/ceph/osdmap.c13
-rw-r--r--net/sunrpc/auth.c4
-rwxr-xr-xscripts/checkpatch.pl13
-rwxr-xr-xscripts/checkversion.pl1
-rw-r--r--scripts/export_report.pl26
-rw-r--r--scripts/package/Makefile4
-rwxr-xr-xscripts/package/mkspec19
-rwxr-xr-xscripts/patch-kernel2
640 files changed, 41157 insertions, 10167 deletions
diff --git a/.mailmap b/.mailmap
index 5a6dd592eedc..353ad5607156 100644
--- a/.mailmap
+++ b/.mailmap
@@ -32,6 +32,7 @@ Brian Avery <b.avery@hp.com>
32Brian King <brking@us.ibm.com> 32Brian King <brking@us.ibm.com>
33Christoph Hellwig <hch@lst.de> 33Christoph Hellwig <hch@lst.de>
34Corey Minyard <minyard@acm.org> 34Corey Minyard <minyard@acm.org>
35Damian Hobson-Garcia <dhobsong@igel.co.jp>
35David Brownell <david-b@pacbell.net> 36David Brownell <david-b@pacbell.net>
36David Woodhouse <dwmw2@shinybook.infradead.org> 37David Woodhouse <dwmw2@shinybook.infradead.org>
37Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 38Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
diff --git a/CREDITS b/CREDITS
index 95c469c610bc..58d2a02add39 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2943,6 +2943,10 @@ S: Kasarmikatu 11 A4
2943S: 70110 Kuopio 2943S: 70110 Kuopio
2944S: Finland 2944S: Finland
2945 2945
2946N: Tobias Ringström
2947E: tori@unhappy.mine.nu
2948D: Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver
2949
2946N: Luca Risolia 2950N: Luca Risolia
2947E: luca.risolia@studio.unibo.it 2951E: luca.risolia@studio.unibo.it
2948P: 1024D/FCE635A4 88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4 2952P: 1024D/FCE635A4 88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 4873c759d535..c1eb41cb9876 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -142,3 +142,67 @@ Description:
142 with the previous I/O request are enabled. When set to 2, 142 with the previous I/O request are enabled. When set to 2,
143 all merge tries are disabled. The default value is 0 - 143 all merge tries are disabled. The default value is 0 -
144 which enables all types of merge tries. 144 which enables all types of merge tries.
145
146What: /sys/block/<disk>/discard_alignment
147Date: May 2011
148Contact: Martin K. Petersen <martin.petersen@oracle.com>
149Description:
150 Devices that support discard functionality may
151 internally allocate space in units that are bigger than
152 the exported logical block size. The discard_alignment
153 parameter indicates how many bytes the beginning of the
154 device is offset from the internal allocation unit's
155 natural alignment.
156
157What: /sys/block/<disk>/<partition>/discard_alignment
158Date: May 2011
159Contact: Martin K. Petersen <martin.petersen@oracle.com>
160Description:
161 Devices that support discard functionality may
162 internally allocate space in units that are bigger than
163 the exported logical block size. The discard_alignment
164 parameter indicates how many bytes the beginning of the
165 partition is offset from the internal allocation unit's
166 natural alignment.
167
168What: /sys/block/<disk>/queue/discard_granularity
169Date: May 2011
170Contact: Martin K. Petersen <martin.petersen@oracle.com>
171Description:
172 Devices that support discard functionality may
173 internally allocate space using units that are bigger
174 than the logical block size. The discard_granularity
175 parameter indicates the size of the internal allocation
176 unit in bytes if reported by the device. Otherwise the
177 discard_granularity will be set to match the device's
178 physical block size. A discard_granularity of 0 means
179 that the device does not support discard functionality.
180
181What: /sys/block/<disk>/queue/discard_max_bytes
182Date: May 2011
183Contact: Martin K. Petersen <martin.petersen@oracle.com>
184Description:
185 Devices that support discard functionality may have
186 internal limits on the number of bytes that can be
187 trimmed or unmapped in a single operation. Some storage
188 protocols also have inherent limits on the number of
189 blocks that can be described in a single command. The
190 discard_max_bytes parameter is set by the device driver
191 to the maximum number of bytes that can be discarded in
192 a single operation. Discard requests issued to the
193 device must not exceed this limit. A discard_max_bytes
194 value of 0 means that the device does not support
195 discard functionality.
196
197What: /sys/block/<disk>/queue/discard_zeroes_data
198Date: May 2011
199Contact: Martin K. Petersen <martin.petersen@oracle.com>
200Description:
201 Devices that support discard functionality may return
202 stale or random data when a previously discarded block
203 is read back. This can cause problems if the filesystem
204 expects discarded blocks to be explicitly cleared. If a
205 device reports that it deterministically returns zeroes
206 when a discarded area is read the discard_zeroes_data
207 parameter will be set to one. Otherwise it will be 0 and
208 the result of reading a discarded area is undefined.
diff --git a/Documentation/ABI/testing/sysfs-ptp b/Documentation/ABI/testing/sysfs-ptp
new file mode 100644
index 000000000000..d40d2b550502
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-ptp
@@ -0,0 +1,98 @@
1What: /sys/class/ptp/
2Date: September 2010
3Contact: Richard Cochran <richardcochran@gmail.com>
4Description:
5 This directory contains files and directories
6 providing a standardized interface to the ancillary
7 features of PTP hardware clocks.
8
9What: /sys/class/ptp/ptpN/
10Date: September 2010
11Contact: Richard Cochran <richardcochran@gmail.com>
12Description:
13 This directory contains the attributes of the Nth PTP
14 hardware clock registered into the PTP class driver
15 subsystem.
16
17What: /sys/class/ptp/ptpN/clock_name
18Date: September 2010
19Contact: Richard Cochran <richardcochran@gmail.com>
20Description:
21 This file contains the name of the PTP hardware clock
22 as a human readable string.
23
24What: /sys/class/ptp/ptpN/max_adjustment
25Date: September 2010
26Contact: Richard Cochran <richardcochran@gmail.com>
27Description:
28 This file contains the PTP hardware clock's maximum
29 frequency adjustment value (a positive integer) in
30 parts per billion.
31
32What: /sys/class/ptp/ptpN/n_alarms
33Date: September 2010
34Contact: Richard Cochran <richardcochran@gmail.com>
35Description:
36 This file contains the number of periodic or one shot
37 alarms offer by the PTP hardware clock.
38
39What: /sys/class/ptp/ptpN/n_external_timestamps
40Date: September 2010
41Contact: Richard Cochran <richardcochran@gmail.com>
42Description:
43 This file contains the number of external timestamp
44 channels offered by the PTP hardware clock.
45
46What: /sys/class/ptp/ptpN/n_periodic_outputs
47Date: September 2010
48Contact: Richard Cochran <richardcochran@gmail.com>
49Description:
50 This file contains the number of programmable periodic
51 output channels offered by the PTP hardware clock.
52
53What: /sys/class/ptp/ptpN/pps_avaiable
54Date: September 2010
55Contact: Richard Cochran <richardcochran@gmail.com>
56Description:
57 This file indicates whether the PTP hardware clock
58 supports a Pulse Per Second to the host CPU. Reading
59 "1" means that the PPS is supported, while "0" means
60 not supported.
61
62What: /sys/class/ptp/ptpN/extts_enable
63Date: September 2010
64Contact: Richard Cochran <richardcochran@gmail.com>
65Description:
66 This write-only file enables or disables external
67 timestamps. To enable external timestamps, write the
68 channel index followed by a "1" into the file.
69 To disable external timestamps, write the channel
70 index followed by a "0" into the file.
71
72What: /sys/class/ptp/ptpN/fifo
73Date: September 2010
74Contact: Richard Cochran <richardcochran@gmail.com>
75Description:
76 This file provides timestamps on external events, in
77 the form of three integers: channel index, seconds,
78 and nanoseconds.
79
80What: /sys/class/ptp/ptpN/period
81Date: September 2010
82Contact: Richard Cochran <richardcochran@gmail.com>
83Description:
84 This write-only file enables or disables periodic
85 outputs. To enable a periodic output, write five
86 integers into the file: channel index, start time
87 seconds, start time nanoseconds, period seconds, and
88 period nanoseconds. To disable a periodic output, set
89 all the seconds and nanoseconds values to zero.
90
91What: /sys/class/ptp/ptpN/pps_enable
92Date: September 2010
93Contact: Richard Cochran <richardcochran@gmail.com>
94Description:
95 This write-only file enables or disables delivery of
96 PPS events to the Linux PPS subsystem. To enable PPS
97 events, write a "1" into the file. To disable events,
98 write a "0" into the file.
diff --git a/Documentation/IRQ-affinity.txt b/Documentation/IRQ-affinity.txt
index b4a615b78403..7890fae18529 100644
--- a/Documentation/IRQ-affinity.txt
+++ b/Documentation/IRQ-affinity.txt
@@ -4,10 +4,11 @@ ChangeLog:
4 4
5SMP IRQ affinity 5SMP IRQ affinity
6 6
7/proc/irq/IRQ#/smp_affinity specifies which target CPUs are permitted 7/proc/irq/IRQ#/smp_affinity and /proc/irq/IRQ#/smp_affinity_list specify
8for a given IRQ source. It's a bitmask of allowed CPUs. It's not allowed 8which target CPUs are permitted for a given IRQ source. It's a bitmask
9to turn off all CPUs, and if an IRQ controller does not support IRQ 9(smp_affinity) or cpu list (smp_affinity_list) of allowed CPUs. It's not
10affinity then the value will not change from the default 0xffffffff. 10allowed to turn off all CPUs, and if an IRQ controller does not support
11IRQ affinity then the value will not change from the default of all cpus.
11 12
12/proc/irq/default_smp_affinity specifies default affinity mask that applies 13/proc/irq/default_smp_affinity specifies default affinity mask that applies
13to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask 14to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask
@@ -54,3 +55,11 @@ round-trip min/avg/max = 0.1/0.5/585.4 ms
54This time around IRQ44 was delivered only to the last four processors. 55This time around IRQ44 was delivered only to the last four processors.
55i.e counters for the CPU0-3 did not change. 56i.e counters for the CPU0-3 did not change.
56 57
58Here is an example of limiting that same irq (44) to cpus 1024 to 1031:
59
60[root@moon 44]# echo 1024-1031 > smp_affinity
61[root@moon 44]# cat smp_affinity
621024-1031
63
64Note that to do this with a bitmask would require 32 bitmasks of zero
65to follow the pertinent one.
diff --git a/Documentation/blockdev/cciss.txt b/Documentation/blockdev/cciss.txt
index 89698e8df7d4..c00c6a5ab21f 100644
--- a/Documentation/blockdev/cciss.txt
+++ b/Documentation/blockdev/cciss.txt
@@ -169,3 +169,18 @@ is issued which positions the tape to a known position. Typically you
169must rewind the tape (by issuing "mt -f /dev/st0 rewind" for example) 169must rewind the tape (by issuing "mt -f /dev/st0 rewind" for example)
170before i/o can proceed again to a tape drive which was reset. 170before i/o can proceed again to a tape drive which was reset.
171 171
172There is a cciss_tape_cmds module parameter which can be used to make cciss
173allocate more commands for use by tape drives. Ordinarily only a few commands
174(6) are allocated for tape drives because tape drives are slow and
175infrequently used and the primary purpose of Smart Array controllers is to
176act as a RAID controller for disk drives, so the vast majority of commands
177are allocated for disk devices. However, if you have more than a few tape
178drives attached to a smart array, the default number of commands may not be
179enought (for example, if you have 8 tape drives, you could only rewind 6
180at one time with the default number of commands.) The cciss_tape_cmds module
181parameter allows more commands (up to 16 more) to be allocated for use by
182tape drives. For example:
183
184 insmod cciss.ko cciss_tape_cmds=16
185
186Or, as a kernel boot parameter passed in via grub: cciss.cciss_tape_cmds=8
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index 9164ae3b83bc..9b728dc17535 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -16,7 +16,7 @@ on all processors in the system. Don't let this scare you into
16thinking SMP cache/tlb flushing must be so inefficient, this is in 16thinking SMP cache/tlb flushing must be so inefficient, this is in
17fact an area where many optimizations are possible. For example, 17fact an area where many optimizations are possible. For example,
18if it can be proven that a user address space has never executed 18if it can be proven that a user address space has never executed
19on a cpu (see vma->cpu_vm_mask), one need not perform a flush 19on a cpu (see mm_cpumask()), one need not perform a flush
20for this address space on that cpu. 20for this address space on that cpu.
21 21
22First, the TLB flushing interfaces, since they are the simplest. The 22First, the TLB flushing interfaces, since they are the simplest. The
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index edb7ae19e868..2c6be0377f55 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -74,3 +74,57 @@ Example:
74 interrupt-parent = <&mpic>; 74 interrupt-parent = <&mpic>;
75 phy-handle = <&phy0> 75 phy-handle = <&phy0>
76 }; 76 };
77
78* Gianfar PTP clock nodes
79
80General Properties:
81
82 - compatible Should be "fsl,etsec-ptp"
83 - reg Offset and length of the register set for the device
84 - interrupts There should be at least two interrupts. Some devices
85 have as many as four PTP related interrupts.
86
87Clock Properties:
88
89 - fsl,tclk-period Timer reference clock period in nanoseconds.
90 - fsl,tmr-prsc Prescaler, divides the output clock.
91 - fsl,tmr-add Frequency compensation value.
92 - fsl,tmr-fiper1 Fixed interval period pulse generator.
93 - fsl,tmr-fiper2 Fixed interval period pulse generator.
94 - fsl,max-adj Maximum frequency adjustment in parts per billion.
95
96 These properties set the operational parameters for the PTP
97 clock. You must choose these carefully for the clock to work right.
98 Here is how to figure good values:
99
100 TimerOsc = system clock MHz
101 tclk_period = desired clock period nanoseconds
102 NominalFreq = 1000 / tclk_period MHz
103 FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0)
104 tmr_add = ceil(2^32 / FreqDivRatio)
105 OutputClock = NominalFreq / tmr_prsc MHz
106 PulseWidth = 1 / OutputClock microseconds
107 FiperFreq1 = desired frequency in Hz
108 FiperDiv1 = 1000000 * OutputClock / FiperFreq1
109 tmr_fiper1 = tmr_prsc * tclk_period * FiperDiv1 - tclk_period
110 max_adj = 1000000000 * (FreqDivRatio - 1.0) - 1
111
112 The calculation for tmr_fiper2 is the same as for tmr_fiper1. The
113 driver expects that tmr_fiper1 will be correctly set to produce a 1
114 Pulse Per Second (PPS) signal, since this will be offered to the PPS
115 subsystem to synchronize the Linux clock.
116
117Example:
118
119 ptp_clock@24E00 {
120 compatible = "fsl,etsec-ptp";
121 reg = <0x24E00 0xB0>;
122 interrupts = <12 0x8 13 0x8>;
123 interrupt-parent = < &ipic >;
124 fsl,tclk-period = <10>;
125 fsl,tmr-prsc = <100>;
126 fsl,tmr-add = <0x999999A4>;
127 fsl,tmr-fiper1 = <0x3B9AC9F6>;
128 fsl,tmr-fiper2 = <0x00018696>;
129 fsl,max-adj = <659999998>;
130 };
diff --git a/Documentation/filesystems/9p.txt b/Documentation/filesystems/9p.txt
index b22abba78fed..13de64c7f0ab 100644
--- a/Documentation/filesystems/9p.txt
+++ b/Documentation/filesystems/9p.txt
@@ -25,6 +25,8 @@ Other applications are described in the following papers:
25 http://xcpu.org/papers/cellfs-talk.pdf 25 http://xcpu.org/papers/cellfs-talk.pdf
26 * PROSE I/O: Using 9p to enable Application Partitions 26 * PROSE I/O: Using 9p to enable Application Partitions
27 http://plan9.escet.urjc.es/iwp9/cready/PROSE_iwp9_2006.pdf 27 http://plan9.escet.urjc.es/iwp9/cready/PROSE_iwp9_2006.pdf
28 * VirtFS: A Virtualization Aware File System pass-through
29 http://goo.gl/3WPDg
28 30
29USAGE 31USAGE
30===== 32=====
@@ -130,31 +132,20 @@ OPTIONS
130RESOURCES 132RESOURCES
131========= 133=========
132 134
133Our current recommendation is to use Inferno (http://www.vitanuova.com/nferno/index.html) 135Protocol specifications are maintained on github:
134as the 9p server. You can start a 9p server under Inferno by issuing the 136http://ericvh.github.com/9p-rfc/
135following command:
136 ; styxlisten -A tcp!*!564 export '#U*'
137 137
138The -A specifies an unauthenticated export. The 564 is the port # (you may 1389p client and server implementations are listed on
139have to choose a higher port number if running as a normal user). The '#U*' 139http://9p.cat-v.org/implementations
140specifies exporting the root of the Linux name space. You may specify a
141subset of the namespace by extending the path: '#U*'/tmp would just export
142/tmp. For more information, see the Inferno manual pages covering styxlisten
143and export.
144 140
145A Linux version of the 9p server is now maintained under the npfs project 141A 9p2000.L server is being developed by LLNL and can be found
146on sourceforge (http://sourceforge.net/projects/npfs). The currently 142at http://code.google.com/p/diod/
147maintained version is the single-threaded version of the server (named spfs)
148available from the same SVN repository.
149 143
150There are user and developer mailing lists available through the v9fs project 144There are user and developer mailing lists available through the v9fs project
151on sourceforge (http://sourceforge.net/projects/v9fs). 145on sourceforge (http://sourceforge.net/projects/v9fs).
152 146
153A stand-alone version of the module (which should build for any 2.6 kernel) 147News and other information is maintained on a Wiki.
154is available via (http://github.com/ericvh/9p-sac/tree/master) 148(http://sf.net/apps/mediawiki/v9fs/index.php).
155
156News and other information is maintained on SWiK (http://swik.net/v9fs)
157and the Wiki (http://sf.net/apps/mediawiki/v9fs/index.php).
158 149
159Bug reports may be issued through the kernel.org bugzilla 150Bug reports may be issued through the kernel.org bugzilla
160(http://bugzilla.kernel.org) 151(http://bugzilla.kernel.org)
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 60740e8ecb37..f48178024067 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -574,6 +574,12 @@ The contents of each smp_affinity file is the same by default:
574 > cat /proc/irq/0/smp_affinity 574 > cat /proc/irq/0/smp_affinity
575 ffffffff 575 ffffffff
576 576
577There is an alternate interface, smp_affinity_list which allows specifying
578a cpu range instead of a bitmask:
579
580 > cat /proc/irq/0/smp_affinity_list
581 1024-1031
582
577The default_smp_affinity mask applies to all non-active IRQs, which are the 583The default_smp_affinity mask applies to all non-active IRQs, which are the
578IRQs which have not yet been allocated/activated, and hence which lack a 584IRQs which have not yet been allocated/activated, and hence which lack a
579/proc/irq/[0-9]* directory. 585/proc/irq/[0-9]* directory.
@@ -583,12 +589,13 @@ reports itself as being attached. This hardware locality information does not
583include information about any possible driver locality preference. 589include information about any possible driver locality preference.
584 590
585prof_cpu_mask specifies which CPUs are to be profiled by the system wide 591prof_cpu_mask specifies which CPUs are to be profiled by the system wide
586profiler. Default value is ffffffff (all cpus). 592profiler. Default value is ffffffff (all cpus if there are only 32 of them).
587 593
588The way IRQs are routed is handled by the IO-APIC, and it's Round Robin 594The way IRQs are routed is handled by the IO-APIC, and it's Round Robin
589between all the CPUs which are allowed to handle it. As usual the kernel has 595between all the CPUs which are allowed to handle it. As usual the kernel has
590more info than you and does a better job than you, so the defaults are the 596more info than you and does a better job than you, so the defaults are the
591best choice for almost everyone. 597best choice for almost everyone. [Note this applies only to those IO-APIC's
598that support "Round Robin" interrupt distribution.]
592 599
593There are three more important subdirectories in /proc: net, scsi, and sys. 600There are three more important subdirectories in /proc: net, scsi, and sys.
594The general rule is that the contents, or even the existence of these 601The general rule is that the contents, or even the existence of these
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7c6624e7a5cb..5438a2d7907f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1777,9 +1777,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1777 1777
1778 nosoftlockup [KNL] Disable the soft-lockup detector. 1778 nosoftlockup [KNL] Disable the soft-lockup detector.
1779 1779
1780 noswapaccount [KNL] Disable accounting of swap in memory resource
1781 controller. (See Documentation/cgroups/memory.txt)
1782
1783 nosync [HW,M68K] Disables sync negotiation for all devices. 1780 nosync [HW,M68K] Disables sync negotiation for all devices.
1784 1781
1785 notsc [BUGS=X86-32] Disable Time Stamp Counter 1782 notsc [BUGS=X86-32] Disable Time Stamp Counter
diff --git a/Documentation/lockstat.txt b/Documentation/lockstat.txt
index 65f4c795015d..9c0a80d17a23 100644
--- a/Documentation/lockstat.txt
+++ b/Documentation/lockstat.txt
@@ -136,7 +136,7 @@ View the top contending locks:
136 dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24 136 dcache_lock: 1037 1161 0.38 45.32 774.51 6611 243371 0.15 306.48 77387.24
137 &inode->i_mutex: 161 286 18446744073709 62882.54 1244614.55 3653 20598 18446744073709 62318.60 1693822.74 137 &inode->i_mutex: 161 286 18446744073709 62882.54 1244614.55 3653 20598 18446744073709 62318.60 1693822.74
138 &zone->lru_lock: 94 94 0.53 7.33 92.10 4366 32690 0.29 59.81 16350.06 138 &zone->lru_lock: 94 94 0.53 7.33 92.10 4366 32690 0.29 59.81 16350.06
139 &inode->i_data.i_mmap_lock: 79 79 0.40 3.77 53.03 11779 87755 0.28 116.93 29898.44 139 &inode->i_data.i_mmap_mutex: 79 79 0.40 3.77 53.03 11779 87755 0.28 116.93 29898.44
140 &q->__queue_lock: 48 50 0.52 31.62 86.31 774 13131 0.17 113.08 12277.52 140 &q->__queue_lock: 48 50 0.52 31.62 86.31 774 13131 0.17 113.08 12277.52
141 &rq->rq_lock_key: 43 47 0.74 68.50 170.63 3706 33929 0.22 107.99 17460.62 141 &rq->rq_lock_key: 43 47 0.74 68.50 170.63 3706 33929 0.22 107.99 17460.62
142 &rq->rq_lock_key#2: 39 46 0.75 6.68 49.03 2979 32292 0.17 125.17 17137.63 142 &rq->rq_lock_key#2: 39 46 0.75 6.68 49.03 2979 32292 0.17 125.17 17137.63
diff --git a/Documentation/ptp/ptp.txt b/Documentation/ptp/ptp.txt
new file mode 100644
index 000000000000..ae8fef86b832
--- /dev/null
+++ b/Documentation/ptp/ptp.txt
@@ -0,0 +1,89 @@
1
2* PTP hardware clock infrastructure for Linux
3
4 This patch set introduces support for IEEE 1588 PTP clocks in
5 Linux. Together with the SO_TIMESTAMPING socket options, this
6 presents a standardized method for developing PTP user space
7 programs, synchronizing Linux with external clocks, and using the
8 ancillary features of PTP hardware clocks.
9
10 A new class driver exports a kernel interface for specific clock
11 drivers and a user space interface. The infrastructure supports a
12 complete set of PTP hardware clock functionality.
13
14 + Basic clock operations
15 - Set time
16 - Get time
17 - Shift the clock by a given offset atomically
18 - Adjust clock frequency
19
20 + Ancillary clock features
21 - One short or periodic alarms, with signal delivery to user program
22 - Time stamp external events
23 - Period output signals configurable from user space
24 - Synchronization of the Linux system time via the PPS subsystem
25
26** PTP hardware clock kernel API
27
28 A PTP clock driver registers itself with the class driver. The
29 class driver handles all of the dealings with user space. The
30 author of a clock driver need only implement the details of
31 programming the clock hardware. The clock driver notifies the class
32 driver of asynchronous events (alarms and external time stamps) via
33 a simple message passing interface.
34
35 The class driver supports multiple PTP clock drivers. In normal use
36 cases, only one PTP clock is needed. However, for testing and
37 development, it can be useful to have more than one clock in a
38 single system, in order to allow performance comparisons.
39
40** PTP hardware clock user space API
41
42 The class driver also creates a character device for each
43 registered clock. User space can use an open file descriptor from
44 the character device as a POSIX clock id and may call
45 clock_gettime, clock_settime, and clock_adjtime. These calls
46 implement the basic clock operations.
47
48 User space programs may control the clock using standardized
49 ioctls. A program may query, enable, configure, and disable the
50 ancillary clock features. User space can receive time stamped
51 events via blocking read() and poll(). One shot and periodic
52 signals may be configured via the POSIX timer_settime() system
53 call.
54
55** Writing clock drivers
56
57 Clock drivers include include/linux/ptp_clock_kernel.h and register
58 themselves by presenting a 'struct ptp_clock_info' to the
59 registration method. Clock drivers must implement all of the
60 functions in the interface. If a clock does not offer a particular
61 ancillary feature, then the driver should just return -EOPNOTSUPP
62 from those functions.
63
64 Drivers must ensure that all of the methods in interface are
65 reentrant. Since most hardware implementations treat the time value
66 as a 64 bit integer accessed as two 32 bit registers, drivers
67 should use spin_lock_irqsave/spin_unlock_irqrestore to protect
68 against concurrent access. This locking cannot be accomplished in
69 class driver, since the lock may also be needed by the clock
70 driver's interrupt service routine.
71
72** Supported hardware
73
74 + Freescale eTSEC gianfar
75 - 2 Time stamp external triggers, programmable polarity (opt. interrupt)
76 - 2 Alarm registers (optional interrupt)
77 - 3 Periodic signals (optional interrupt)
78
79 + National DP83640
80 - 6 GPIOs programmable as inputs or outputs
81 - 6 GPIOs with dedicated functions (LED/JTAG/clock) can also be
82 used as general inputs or outputs
83 - GPIO inputs can time stamp external triggers
84 - GPIO outputs can produce periodic signals
85 - 1 interrupt pin
86
87 + Intel IXP465
88 - Auxiliary Slave/Master Mode Snapshot (optional interrupt)
89 - Target Time (optional interrupt)
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
new file mode 100644
index 000000000000..f59ded066108
--- /dev/null
+++ b/Documentation/ptp/testptp.c
@@ -0,0 +1,381 @@
1/*
2 * PTP 1588 clock support - User space test program
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <errno.h>
21#include <fcntl.h>
22#include <math.h>
23#include <signal.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/ioctl.h>
28#include <sys/mman.h>
29#include <sys/stat.h>
30#include <sys/time.h>
31#include <sys/timex.h>
32#include <sys/types.h>
33#include <time.h>
34#include <unistd.h>
35
36#include <linux/ptp_clock.h>
37
38#define DEVICE "/dev/ptp0"
39
40#ifndef ADJ_SETOFFSET
41#define ADJ_SETOFFSET 0x0100
42#endif
43
44#ifndef CLOCK_INVALID
45#define CLOCK_INVALID -1
46#endif
47
48/* When glibc offers the syscall, this will go away. */
49#include <sys/syscall.h>
50static int clock_adjtime(clockid_t id, struct timex *tx)
51{
52 return syscall(__NR_clock_adjtime, id, tx);
53}
54
55static clockid_t get_clockid(int fd)
56{
57#define CLOCKFD 3
58#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
59
60 return FD_TO_CLOCKID(fd);
61}
62
63static void handle_alarm(int s)
64{
65 printf("received signal %d\n", s);
66}
67
68static int install_handler(int signum, void (*handler)(int))
69{
70 struct sigaction action;
71 sigset_t mask;
72
73 /* Unblock the signal. */
74 sigemptyset(&mask);
75 sigaddset(&mask, signum);
76 sigprocmask(SIG_UNBLOCK, &mask, NULL);
77
78 /* Install the signal handler. */
79 action.sa_handler = handler;
80 action.sa_flags = 0;
81 sigemptyset(&action.sa_mask);
82 sigaction(signum, &action, NULL);
83
84 return 0;
85}
86
87static long ppb_to_scaled_ppm(int ppb)
88{
89 /*
90 * The 'freq' field in the 'struct timex' is in parts per
91 * million, but with a 16 bit binary fractional field.
92 * Instead of calculating either one of
93 *
94 * scaled_ppm = (ppb / 1000) << 16 [1]
95 * scaled_ppm = (ppb << 16) / 1000 [2]
96 *
97 * we simply use double precision math, in order to avoid the
98 * truncation in [1] and the possible overflow in [2].
99 */
100 return (long) (ppb * 65.536);
101}
102
103static void usage(char *progname)
104{
105 fprintf(stderr,
106 "usage: %s [options]\n"
107 " -a val request a one-shot alarm after 'val' seconds\n"
108 " -A val request a periodic alarm every 'val' seconds\n"
109 " -c query the ptp clock's capabilities\n"
110 " -d name device to open\n"
111 " -e val read 'val' external time stamp events\n"
112 " -f val adjust the ptp clock frequency by 'val' ppb\n"
113 " -g get the ptp clock time\n"
114 " -h prints this message\n"
115 " -p val enable output with a period of 'val' nanoseconds\n"
116 " -P val enable or disable (val=1|0) the system clock PPS\n"
117 " -s set the ptp clock time from the system time\n"
118 " -S set the system time from the ptp clock time\n"
119 " -t val shift the ptp clock time by 'val' seconds\n",
120 progname);
121}
122
123int main(int argc, char *argv[])
124{
125 struct ptp_clock_caps caps;
126 struct ptp_extts_event event;
127 struct ptp_extts_request extts_request;
128 struct ptp_perout_request perout_request;
129 struct timespec ts;
130 struct timex tx;
131
132 static timer_t timerid;
133 struct itimerspec timeout;
134 struct sigevent sigevent;
135
136 char *progname;
137 int c, cnt, fd;
138
139 char *device = DEVICE;
140 clockid_t clkid;
141 int adjfreq = 0x7fffffff;
142 int adjtime = 0;
143 int capabilities = 0;
144 int extts = 0;
145 int gettime = 0;
146 int oneshot = 0;
147 int periodic = 0;
148 int perout = -1;
149 int pps = -1;
150 int settime = 0;
151
152 progname = strrchr(argv[0], '/');
153 progname = progname ? 1+progname : argv[0];
154 while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghp:P:sSt:v"))) {
155 switch (c) {
156 case 'a':
157 oneshot = atoi(optarg);
158 break;
159 case 'A':
160 periodic = atoi(optarg);
161 break;
162 case 'c':
163 capabilities = 1;
164 break;
165 case 'd':
166 device = optarg;
167 break;
168 case 'e':
169 extts = atoi(optarg);
170 break;
171 case 'f':
172 adjfreq = atoi(optarg);
173 break;
174 case 'g':
175 gettime = 1;
176 break;
177 case 'p':
178 perout = atoi(optarg);
179 break;
180 case 'P':
181 pps = atoi(optarg);
182 break;
183 case 's':
184 settime = 1;
185 break;
186 case 'S':
187 settime = 2;
188 break;
189 case 't':
190 adjtime = atoi(optarg);
191 break;
192 case 'h':
193 usage(progname);
194 return 0;
195 case '?':
196 default:
197 usage(progname);
198 return -1;
199 }
200 }
201
202 fd = open(device, O_RDWR);
203 if (fd < 0) {
204 fprintf(stderr, "opening %s: %s\n", device, strerror(errno));
205 return -1;
206 }
207
208 clkid = get_clockid(fd);
209 if (CLOCK_INVALID == clkid) {
210 fprintf(stderr, "failed to read clock id\n");
211 return -1;
212 }
213
214 if (capabilities) {
215 if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) {
216 perror("PTP_CLOCK_GETCAPS");
217 } else {
218 printf("capabilities:\n"
219 " %d maximum frequency adjustment (ppb)\n"
220 " %d programmable alarms\n"
221 " %d external time stamp channels\n"
222 " %d programmable periodic signals\n"
223 " %d pulse per second\n",
224 caps.max_adj,
225 caps.n_alarm,
226 caps.n_ext_ts,
227 caps.n_per_out,
228 caps.pps);
229 }
230 }
231
232 if (0x7fffffff != adjfreq) {
233 memset(&tx, 0, sizeof(tx));
234 tx.modes = ADJ_FREQUENCY;
235 tx.freq = ppb_to_scaled_ppm(adjfreq);
236 if (clock_adjtime(clkid, &tx)) {
237 perror("clock_adjtime");
238 } else {
239 puts("frequency adjustment okay");
240 }
241 }
242
243 if (adjtime) {
244 memset(&tx, 0, sizeof(tx));
245 tx.modes = ADJ_SETOFFSET;
246 tx.time.tv_sec = adjtime;
247 tx.time.tv_usec = 0;
248 if (clock_adjtime(clkid, &tx) < 0) {
249 perror("clock_adjtime");
250 } else {
251 puts("time shift okay");
252 }
253 }
254
255 if (gettime) {
256 if (clock_gettime(clkid, &ts)) {
257 perror("clock_gettime");
258 } else {
259 printf("clock time: %ld.%09ld or %s",
260 ts.tv_sec, ts.tv_nsec, ctime(&ts.tv_sec));
261 }
262 }
263
264 if (settime == 1) {
265 clock_gettime(CLOCK_REALTIME, &ts);
266 if (clock_settime(clkid, &ts)) {
267 perror("clock_settime");
268 } else {
269 puts("set time okay");
270 }
271 }
272
273 if (settime == 2) {
274 clock_gettime(clkid, &ts);
275 if (clock_settime(CLOCK_REALTIME, &ts)) {
276 perror("clock_settime");
277 } else {
278 puts("set time okay");
279 }
280 }
281
282 if (extts) {
283 memset(&extts_request, 0, sizeof(extts_request));
284 extts_request.index = 0;
285 extts_request.flags = PTP_ENABLE_FEATURE;
286 if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
287 perror("PTP_EXTTS_REQUEST");
288 extts = 0;
289 } else {
290 puts("external time stamp request okay");
291 }
292 for (; extts; extts--) {
293 cnt = read(fd, &event, sizeof(event));
294 if (cnt != sizeof(event)) {
295 perror("read");
296 break;
297 }
298 printf("event index %u at %lld.%09u\n", event.index,
299 event.t.sec, event.t.nsec);
300 fflush(stdout);
301 }
302 /* Disable the feature again. */
303 extts_request.flags = 0;
304 if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
305 perror("PTP_EXTTS_REQUEST");
306 }
307 }
308
309 if (oneshot) {
310 install_handler(SIGALRM, handle_alarm);
311 /* Create a timer. */
312 sigevent.sigev_notify = SIGEV_SIGNAL;
313 sigevent.sigev_signo = SIGALRM;
314 if (timer_create(clkid, &sigevent, &timerid)) {
315 perror("timer_create");
316 return -1;
317 }
318 /* Start the timer. */
319 memset(&timeout, 0, sizeof(timeout));
320 timeout.it_value.tv_sec = oneshot;
321 if (timer_settime(timerid, 0, &timeout, NULL)) {
322 perror("timer_settime");
323 return -1;
324 }
325 pause();
326 timer_delete(timerid);
327 }
328
329 if (periodic) {
330 install_handler(SIGALRM, handle_alarm);
331 /* Create a timer. */
332 sigevent.sigev_notify = SIGEV_SIGNAL;
333 sigevent.sigev_signo = SIGALRM;
334 if (timer_create(clkid, &sigevent, &timerid)) {
335 perror("timer_create");
336 return -1;
337 }
338 /* Start the timer. */
339 memset(&timeout, 0, sizeof(timeout));
340 timeout.it_interval.tv_sec = periodic;
341 timeout.it_value.tv_sec = periodic;
342 if (timer_settime(timerid, 0, &timeout, NULL)) {
343 perror("timer_settime");
344 return -1;
345 }
346 while (1) {
347 pause();
348 }
349 timer_delete(timerid);
350 }
351
352 if (perout >= 0) {
353 if (clock_gettime(clkid, &ts)) {
354 perror("clock_gettime");
355 return -1;
356 }
357 memset(&perout_request, 0, sizeof(perout_request));
358 perout_request.index = 0;
359 perout_request.start.sec = ts.tv_sec + 2;
360 perout_request.start.nsec = 0;
361 perout_request.period.sec = 0;
362 perout_request.period.nsec = perout;
363 if (ioctl(fd, PTP_PEROUT_REQUEST, &perout_request)) {
364 perror("PTP_PEROUT_REQUEST");
365 } else {
366 puts("periodic output request okay");
367 }
368 }
369
370 if (pps != -1) {
371 int enable = pps ? 1 : 0;
372 if (ioctl(fd, PTP_ENABLE_PPS, enable)) {
373 perror("PTP_ENABLE_PPS");
374 } else {
375 puts("pps for system time request okay");
376 }
377 }
378
379 close(fd);
380 return 0;
381}
diff --git a/Documentation/ptp/testptp.mk b/Documentation/ptp/testptp.mk
new file mode 100644
index 000000000000..4ef2d9755421
--- /dev/null
+++ b/Documentation/ptp/testptp.mk
@@ -0,0 +1,33 @@
1# PTP 1588 clock support - User space test program
2#
3# Copyright (C) 2010 OMICRON electronics GmbH
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU General Public License as published by
7# the Free Software Foundation; either version 2 of the License, or
8# (at your option) any later version.
9#
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13# GNU General Public License for more details.
14#
15# You should have received a copy of the GNU General Public License
16# along with this program; if not, write to the Free Software
17# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19CC = $(CROSS_COMPILE)gcc
20INC = -I$(KBUILD_OUTPUT)/usr/include
21CFLAGS = -Wall $(INC)
22LDLIBS = -lrt
23PROGS = testptp
24
25all: $(PROGS)
26
27testptp: testptp.o
28
29clean:
30 rm -f testptp.o
31
32distclean: clean
33 rm -f $(PROGS)
diff --git a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
index 9b7e1904db1c..5d0fc8bfcdb9 100644
--- a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
+++ b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
@@ -1182,6 +1182,16 @@
1182 forge.net/> and explains these in detail, as well as 1182 forge.net/> and explains these in detail, as well as
1183 some other issues. 1183 some other issues.
1184 1184
1185 There is also a related point-to-point only "ucast" transport.
1186 This is useful when your network does not support multicast, and
1187 all network connections are simple point to point links.
1188
1189 The full set of command line options for this transport are
1190
1191
1192 ethn=ucast,ethernet address,remote address,listen port,remote port
1193
1194
1185 1195
1186 1196
1187 66..66.. TTUUNN//TTAAPP wwiitthh tthhee uummll__nneett hheellppeerr 1197 66..66.. TTUUNN//TTAAPP wwiitthh tthhee uummll__nneett hheellppeerr
diff --git a/Documentation/vm/locking b/Documentation/vm/locking
index 25fadb448760..f61228bd6395 100644
--- a/Documentation/vm/locking
+++ b/Documentation/vm/locking
@@ -66,7 +66,7 @@ in some cases it is not really needed. Eg, vm_start is modified by
66expand_stack(), it is hard to come up with a destructive scenario without 66expand_stack(), it is hard to come up with a destructive scenario without
67having the vmlist protection in this case. 67having the vmlist protection in this case.
68 68
69The page_table_lock nests with the inode i_mmap_lock and the kmem cache 69The page_table_lock nests with the inode i_mmap_mutex and the kmem cache
70c_spinlock spinlocks. This is okay, since the kmem code asks for pages after 70c_spinlock spinlocks. This is okay, since the kmem code asks for pages after
71dropping c_spinlock. The page_table_lock also nests with pagecache_lock and 71dropping c_spinlock. The page_table_lock also nests with pagecache_lock and
72pagemap_lru_lock spinlocks, and no code asks for memory with these locks 72pagemap_lru_lock spinlocks, and no code asks for memory with these locks
diff --git a/MAINTAINERS b/MAINTAINERS
index 88a5bef09473..a26c9ee7703d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -287,35 +287,35 @@ F: sound/pci/ad1889.*
287 287
288AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER 288AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
289M: Michael Hennerich <michael.hennerich@analog.com> 289M: Michael Hennerich <michael.hennerich@analog.com>
290L: device-driver-devel@blackfin.uclinux.org 290L: device-drivers-devel@blackfin.uclinux.org
291W: http://wiki.analog.com/AD5254 291W: http://wiki.analog.com/AD5254
292S: Supported 292S: Supported
293F: drivers/misc/ad525x_dpot.c 293F: drivers/misc/ad525x_dpot.c
294 294
295AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821) 295AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821)
296M: Michael Hennerich <michael.hennerich@analog.com> 296M: Michael Hennerich <michael.hennerich@analog.com>
297L: device-driver-devel@blackfin.uclinux.org 297L: device-drivers-devel@blackfin.uclinux.org
298W: http://wiki.analog.com/AD5398 298W: http://wiki.analog.com/AD5398
299S: Supported 299S: Supported
300F: drivers/regulator/ad5398.c 300F: drivers/regulator/ad5398.c
301 301
302AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A) 302AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A)
303M: Michael Hennerich <michael.hennerich@analog.com> 303M: Michael Hennerich <michael.hennerich@analog.com>
304L: device-driver-devel@blackfin.uclinux.org 304L: device-drivers-devel@blackfin.uclinux.org
305W: http://wiki.analog.com/AD7142 305W: http://wiki.analog.com/AD7142
306S: Supported 306S: Supported
307F: drivers/input/misc/ad714x.c 307F: drivers/input/misc/ad714x.c
308 308
309AD7877 TOUCHSCREEN DRIVER 309AD7877 TOUCHSCREEN DRIVER
310M: Michael Hennerich <michael.hennerich@analog.com> 310M: Michael Hennerich <michael.hennerich@analog.com>
311L: device-driver-devel@blackfin.uclinux.org 311L: device-drivers-devel@blackfin.uclinux.org
312W: http://wiki.analog.com/AD7877 312W: http://wiki.analog.com/AD7877
313S: Supported 313S: Supported
314F: drivers/input/touchscreen/ad7877.c 314F: drivers/input/touchscreen/ad7877.c
315 315
316AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889) 316AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889)
317M: Michael Hennerich <michael.hennerich@analog.com> 317M: Michael Hennerich <michael.hennerich@analog.com>
318L: device-driver-devel@blackfin.uclinux.org 318L: device-drivers-devel@blackfin.uclinux.org
319W: http://wiki.analog.com/AD7879 319W: http://wiki.analog.com/AD7879
320S: Supported 320S: Supported
321F: drivers/input/touchscreen/ad7879.c 321F: drivers/input/touchscreen/ad7879.c
@@ -341,7 +341,7 @@ F: drivers/net/wireless/adm8211.*
341 341
342ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501) 342ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
343M: Michael Hennerich <michael.hennerich@analog.com> 343M: Michael Hennerich <michael.hennerich@analog.com>
344L: device-driver-devel@blackfin.uclinux.org 344L: device-drivers-devel@blackfin.uclinux.org
345W: http://wiki.analog.com/ADP5520 345W: http://wiki.analog.com/ADP5520
346S: Supported 346S: Supported
347F: drivers/mfd/adp5520.c 347F: drivers/mfd/adp5520.c
@@ -352,7 +352,7 @@ F: drivers/input/keyboard/adp5520-keys.c
352 352
353ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587) 353ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
354M: Michael Hennerich <michael.hennerich@analog.com> 354M: Michael Hennerich <michael.hennerich@analog.com>
355L: device-driver-devel@blackfin.uclinux.org 355L: device-drivers-devel@blackfin.uclinux.org
356W: http://wiki.analog.com/ADP5588 356W: http://wiki.analog.com/ADP5588
357S: Supported 357S: Supported
358F: drivers/input/keyboard/adp5588-keys.c 358F: drivers/input/keyboard/adp5588-keys.c
@@ -360,7 +360,7 @@ F: drivers/gpio/adp5588-gpio.c
360 360
361ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863) 361ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
362M: Michael Hennerich <michael.hennerich@analog.com> 362M: Michael Hennerich <michael.hennerich@analog.com>
363L: device-driver-devel@blackfin.uclinux.org 363L: device-drivers-devel@blackfin.uclinux.org
364W: http://wiki.analog.com/ADP8860 364W: http://wiki.analog.com/ADP8860
365S: Supported 365S: Supported
366F: drivers/video/backlight/adp8860_bl.c 366F: drivers/video/backlight/adp8860_bl.c
@@ -387,7 +387,7 @@ F: drivers/hwmon/adt7475.c
387 387
388ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346) 388ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
389M: Michael Hennerich <michael.hennerich@analog.com> 389M: Michael Hennerich <michael.hennerich@analog.com>
390L: device-driver-devel@blackfin.uclinux.org 390L: device-drivers-devel@blackfin.uclinux.org
391W: http://wiki.analog.com/ADXL345 391W: http://wiki.analog.com/ADXL345
392S: Supported 392S: Supported
393F: drivers/input/misc/adxl34x.c 393F: drivers/input/misc/adxl34x.c
@@ -533,7 +533,7 @@ S: Maintained
533F: drivers/infiniband/hw/amso1100/ 533F: drivers/infiniband/hw/amso1100/
534 534
535ANALOG DEVICES INC ASOC CODEC DRIVERS 535ANALOG DEVICES INC ASOC CODEC DRIVERS
536L: device-driver-devel@blackfin.uclinux.org 536L: device-drivers-devel@blackfin.uclinux.org
537L: alsa-devel@alsa-project.org (moderated for non-subscribers) 537L: alsa-devel@alsa-project.org (moderated for non-subscribers)
538W: http://wiki.analog.com/ 538W: http://wiki.analog.com/
539S: Supported 539S: Supported
@@ -2041,9 +2041,8 @@ F: net/ax25/ax25_timer.c
2041F: net/ax25/sysctl_net_ax25.c 2041F: net/ax25/sysctl_net_ax25.c
2042 2042
2043DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER 2043DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
2044M: Tobias Ringstrom <tori@unhappy.mine.nu>
2045L: netdev@vger.kernel.org 2044L: netdev@vger.kernel.org
2046S: Maintained 2045S: Orphan
2047F: Documentation/networking/dmfe.txt 2046F: Documentation/networking/dmfe.txt
2048F: drivers/net/tulip/dmfe.c 2047F: drivers/net/tulip/dmfe.c
2049 2048
@@ -3905,7 +3904,6 @@ F: drivers/*/*/*pasemi*
3905LINUX SECURITY MODULE (LSM) FRAMEWORK 3904LINUX SECURITY MODULE (LSM) FRAMEWORK
3906M: Chris Wright <chrisw@sous-sol.org> 3905M: Chris Wright <chrisw@sous-sol.org>
3907L: linux-security-module@vger.kernel.org 3906L: linux-security-module@vger.kernel.org
3908T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git
3909S: Supported 3907S: Supported
3910 3908
3911LIS3LV02D ACCELEROMETER DRIVER 3909LIS3LV02D ACCELEROMETER DRIVER
diff --git a/Makefile b/Makefile
index 6b73d1eed1ea..529d93fa2430 100644
--- a/Makefile
+++ b/Makefile
@@ -220,6 +220,14 @@ ifeq ($(ARCH),sh64)
220 SRCARCH := sh 220 SRCARCH := sh
221endif 221endif
222 222
223# Additional ARCH settings for tile
224ifeq ($(ARCH),tilepro)
225 SRCARCH := tile
226endif
227ifeq ($(ARCH),tilegx)
228 SRCARCH := tile
229endif
230
223# Where to locate arch specific headers 231# Where to locate arch specific headers
224hdr-arch := $(SRCARCH) 232hdr-arch := $(SRCARCH)
225 233
@@ -1009,7 +1017,8 @@ include/generated/utsrelease.h: include/config/kernel.release FORCE
1009 1017
1010PHONY += headerdep 1018PHONY += headerdep
1011headerdep: 1019headerdep:
1012 $(Q)find include/ -name '*.h' | xargs --max-args 1 scripts/headerdep.pl 1020 $(Q)find $(srctree)/include/ -name '*.h' | xargs --max-args 1 \
1021 $(srctree)/scripts/headerdep.pl -I$(srctree)/include
1013 1022
1014# --------------------------------------------------------------------------- 1023# ---------------------------------------------------------------------------
1015 1024
@@ -1417,13 +1426,15 @@ tags TAGS cscope gtags: FORCE
1417# Scripts to check various things for consistency 1426# Scripts to check various things for consistency
1418# --------------------------------------------------------------------------- 1427# ---------------------------------------------------------------------------
1419 1428
1429PHONY += includecheck versioncheck coccicheck namespacecheck export_report
1430
1420includecheck: 1431includecheck:
1421 find * $(RCS_FIND_IGNORE) \ 1432 find $(srctree)/* $(RCS_FIND_IGNORE) \
1422 -name '*.[hcS]' -type f -print | sort \ 1433 -name '*.[hcS]' -type f -print | sort \
1423 | xargs $(PERL) -w $(srctree)/scripts/checkincludes.pl 1434 | xargs $(PERL) -w $(srctree)/scripts/checkincludes.pl
1424 1435
1425versioncheck: 1436versioncheck:
1426 find * $(RCS_FIND_IGNORE) \ 1437 find $(srctree)/* $(RCS_FIND_IGNORE) \
1427 -name '*.[hcS]' -type f -print | sort \ 1438 -name '*.[hcS]' -type f -print | sort \
1428 | xargs $(PERL) -w $(srctree)/scripts/checkversion.pl 1439 | xargs $(PERL) -w $(srctree)/scripts/checkversion.pl
1429 1440
diff --git a/arch/Kconfig b/arch/Kconfig
index 8d24bacaa61e..26b0e2397a57 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -175,4 +175,7 @@ config HAVE_ARCH_JUMP_LABEL
175config HAVE_ARCH_MUTEX_CPU_RELAX 175config HAVE_ARCH_MUTEX_CPU_RELAX
176 bool 176 bool
177 177
178config HAVE_RCU_TABLE_FREE
179 bool
180
178source "kernel/gcov/Kconfig" 181source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 9808998cc073..e3a82775f9da 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -12,6 +12,7 @@ config ALPHA
12 select GENERIC_IRQ_PROBE 12 select GENERIC_IRQ_PROBE
13 select AUTO_IRQ_AFFINITY if SMP 13 select AUTO_IRQ_AFFINITY if SMP
14 select GENERIC_IRQ_SHOW 14 select GENERIC_IRQ_SHOW
15 select ARCH_WANT_OPTIONAL_GPIOLIB
15 help 16 help
16 The Alpha is a 64-bit general-purpose processor designed and 17 The Alpha is a 64-bit general-purpose processor designed and
17 marketed by the Digital Equipment Corporation of blessed memory, 18 marketed by the Digital Equipment Corporation of blessed memory,
@@ -51,6 +52,9 @@ config GENERIC_CALIBRATE_DELAY
51config GENERIC_CMOS_UPDATE 52config GENERIC_CMOS_UPDATE
52 def_bool y 53 def_bool y
53 54
55config GENERIC_GPIO
56 def_bool y
57
54config ZONE_DMA 58config ZONE_DMA
55 bool 59 bool
56 default y 60 default y
diff --git a/arch/alpha/include/asm/gpio.h b/arch/alpha/include/asm/gpio.h
new file mode 100644
index 000000000000..7dc6a6343c06
--- /dev/null
+++ b/arch/alpha/include/asm/gpio.h
@@ -0,0 +1,55 @@
1/*
2 * Generic GPIO API implementation for Alpha.
3 *
4 * A stright copy of that for PowerPC which was:
5 *
6 * Copyright (c) 2007-2008 MontaVista Software, Inc.
7 *
8 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#ifndef _ASM_ALPHA_GPIO_H
17#define _ASM_ALPHA_GPIO_H
18
19#include <linux/errno.h>
20#include <asm-generic/gpio.h>
21
22#ifdef CONFIG_GPIOLIB
23
24/*
25 * We don't (yet) implement inlined/rapid versions for on-chip gpios.
26 * Just call gpiolib.
27 */
28static inline int gpio_get_value(unsigned int gpio)
29{
30 return __gpio_get_value(gpio);
31}
32
33static inline void gpio_set_value(unsigned int gpio, int value)
34{
35 __gpio_set_value(gpio, value);
36}
37
38static inline int gpio_cansleep(unsigned int gpio)
39{
40 return __gpio_cansleep(gpio);
41}
42
43static inline int gpio_to_irq(unsigned int gpio)
44{
45 return __gpio_to_irq(gpio);
46}
47
48static inline int irq_to_gpio(unsigned int irq)
49{
50 return -EINVAL;
51}
52
53#endif /* CONFIG_GPIOLIB */
54
55#endif /* _ASM_ALPHA_GPIO_H */
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h
index 3f390e8cc0b3..c46e714aa3e0 100644
--- a/arch/alpha/include/asm/smp.h
+++ b/arch/alpha/include/asm/smp.h
@@ -39,8 +39,6 @@ struct cpuinfo_alpha {
39 39
40extern struct cpuinfo_alpha cpu_data[NR_CPUS]; 40extern struct cpuinfo_alpha cpu_data[NR_CPUS];
41 41
42#define PROC_CHANGE_PENALTY 20
43
44#define hard_smp_processor_id() __hard_smp_processor_id() 42#define hard_smp_processor_id() __hard_smp_processor_id()
45#define raw_smp_processor_id() (current_thread_info()->cpu) 43#define raw_smp_processor_id() (current_thread_info()->cpu)
46 44
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 3ec35066f1dc..838eac128409 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -121,7 +121,7 @@ common_shutdown_1(void *generic_ptr)
121 /* Wait for the secondaries to halt. */ 121 /* Wait for the secondaries to halt. */
122 set_cpu_present(boot_cpuid, false); 122 set_cpu_present(boot_cpuid, false);
123 set_cpu_possible(boot_cpuid, false); 123 set_cpu_possible(boot_cpuid, false);
124 while (cpus_weight(cpu_present_map)) 124 while (cpumask_weight(cpu_present_mask))
125 barrier(); 125 barrier();
126#endif 126#endif
127 127
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index edbddcbd5bc6..cc0fd862cf26 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -1257,7 +1257,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
1257#ifdef CONFIG_SMP 1257#ifdef CONFIG_SMP
1258 seq_printf(f, "cpus active\t\t: %u\n" 1258 seq_printf(f, "cpus active\t\t: %u\n"
1259 "cpu active mask\t\t: %016lx\n", 1259 "cpu active mask\t\t: %016lx\n",
1260 num_online_cpus(), cpus_addr(cpu_possible_map)[0]); 1260 num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]);
1261#endif 1261#endif
1262 1262
1263 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape); 1263 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 5a621c6d22ab..d739703608fc 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -451,7 +451,7 @@ setup_smp(void)
451 } 451 }
452 452
453 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n", 453 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
454 smp_num_probed, cpu_present_map.bits[0]); 454 smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
455} 455}
456 456
457/* 457/*
@@ -629,8 +629,9 @@ smp_send_reschedule(int cpu)
629void 629void
630smp_send_stop(void) 630smp_send_stop(void)
631{ 631{
632 cpumask_t to_whom = cpu_possible_map; 632 cpumask_t to_whom;
633 cpu_clear(smp_processor_id(), to_whom); 633 cpumask_copy(&to_whom, cpu_possible_mask);
634 cpumask_clear_cpu(smp_processor_id(), &to_whom);
634#ifdef DEBUG_IPI_MSG 635#ifdef DEBUG_IPI_MSG
635 if (hard_smp_processor_id() != boot_cpu_id) 636 if (hard_smp_processor_id() != boot_cpu_id)
636 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); 637 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 5ac00fd4cd0c..f8856829c22a 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -140,7 +140,7 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
140 140
141 for (cpu = 0; cpu < 4; cpu++) { 141 for (cpu = 0; cpu < 4; cpu++) {
142 unsigned long aff = cpu_irq_affinity[cpu]; 142 unsigned long aff = cpu_irq_affinity[cpu];
143 if (cpu_isset(cpu, affinity)) 143 if (cpumask_test_cpu(cpu, &affinity))
144 aff |= 1UL << irq; 144 aff |= 1UL << irq;
145 else 145 else
146 aff &= ~(1UL << irq); 146 aff &= ~(1UL << irq);
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index fea0e4620994..6994407e242a 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -65,10 +65,11 @@ titan_update_irq_hw(unsigned long mask)
65 register int bcpu = boot_cpuid; 65 register int bcpu = boot_cpuid;
66 66
67#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
68 cpumask_t cpm = cpu_present_map; 68 cpumask_t cpm;
69 volatile unsigned long *dim0, *dim1, *dim2, *dim3; 69 volatile unsigned long *dim0, *dim1, *dim2, *dim3;
70 unsigned long mask0, mask1, mask2, mask3, dummy; 70 unsigned long mask0, mask1, mask2, mask3, dummy;
71 71
72 cpumask_copy(&cpm, cpu_present_mask);
72 mask &= ~isa_enable; 73 mask &= ~isa_enable;
73 mask0 = mask & titan_cpu_irq_affinity[0]; 74 mask0 = mask & titan_cpu_irq_affinity[0];
74 mask1 = mask & titan_cpu_irq_affinity[1]; 75 mask1 = mask & titan_cpu_irq_affinity[1];
@@ -84,10 +85,10 @@ titan_update_irq_hw(unsigned long mask)
84 dim1 = &cchip->dim1.csr; 85 dim1 = &cchip->dim1.csr;
85 dim2 = &cchip->dim2.csr; 86 dim2 = &cchip->dim2.csr;
86 dim3 = &cchip->dim3.csr; 87 dim3 = &cchip->dim3.csr;
87 if (!cpu_isset(0, cpm)) dim0 = &dummy; 88 if (!cpumask_test_cpu(0, &cpm)) dim0 = &dummy;
88 if (!cpu_isset(1, cpm)) dim1 = &dummy; 89 if (!cpumask_test_cpu(1, &cpm)) dim1 = &dummy;
89 if (!cpu_isset(2, cpm)) dim2 = &dummy; 90 if (!cpumask_test_cpu(2, &cpm)) dim2 = &dummy;
90 if (!cpu_isset(3, cpm)) dim3 = &dummy; 91 if (!cpumask_test_cpu(3, &cpm)) dim3 = &dummy;
91 92
92 *dim0 = mask0; 93 *dim0 = mask0;
93 *dim1 = mask1; 94 *dim1 = mask1;
@@ -137,7 +138,7 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
137 int cpu; 138 int cpu;
138 139
139 for (cpu = 0; cpu < 4; cpu++) { 140 for (cpu = 0; cpu < 4; cpu++) {
140 if (cpu_isset(cpu, affinity)) 141 if (cpumask_test_cpu(cpu, &affinity))
141 titan_cpu_irq_affinity[cpu] |= 1UL << irq; 142 titan_cpu_irq_affinity[cpu] |= 1UL << irq;
142 else 143 else
143 titan_cpu_irq_affinity[cpu] &= ~(1UL << irq); 144 titan_cpu_irq_affinity[cpu] &= ~(1UL << irq);
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 86425ab53bf5..69d0c5761e2f 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -32,8 +32,6 @@
32#include <asm/console.h> 32#include <asm/console.h>
33#include <asm/tlb.h> 33#include <asm/tlb.h>
34 34
35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36
37extern void die_if_kernel(char *,struct pt_regs *,long); 35extern void die_if_kernel(char *,struct pt_regs *,long);
38 36
39static struct pcb_struct original_pcb; 37static struct pcb_struct original_pcb;
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 7b2c56d8f930..3973ae395772 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -313,6 +313,7 @@ void __init paging_init(void)
313 zones_size[ZONE_DMA] = dma_local_pfn; 313 zones_size[ZONE_DMA] = dma_local_pfn;
314 zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; 314 zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn;
315 } 315 }
316 node_set_state(nid, N_NORMAL_MEMORY);
316 free_area_init_node(nid, zones_size, start_pfn, NULL); 317 free_area_init_node(nid, zones_size, start_pfn, NULL);
317 } 318 }
318 319
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 03d01d783e3b..81cbe40c159c 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,13 +63,6 @@ config DEBUG_USER
63 8 - SIGSEGV faults 63 8 - SIGSEGV faults
64 16 - SIGBUS faults 64 16 - SIGBUS faults
65 65
66config DEBUG_STACK_USAGE
67 bool "Enable stack utilization instrumentation"
68 depends on DEBUG_KERNEL
69 help
70 Enables the display of the minimum amount of free stack which each
71 task has ever had available in the sysrq-T output.
72
73# These options are only for real kernel hackers who want to get their hands dirty. 66# These options are only for real kernel hackers who want to get their hands dirty.
74config DEBUG_LL 67config DEBUG_LL
75 bool "Kernel low-level debugging functions" 68 bool "Kernel low-level debugging functions"
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a87664f54f93..d2b514fd76f4 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -20,12 +20,6 @@
20 20
21#define raw_smp_processor_id() (current_thread_info()->cpu) 21#define raw_smp_processor_id() (current_thread_info()->cpu)
22 22
23/*
24 * at the moment, there's not a big penalty for changing CPUs
25 * (the >big< penalty is running SMP in the first place)
26 */
27#define PROC_CHANGE_PENALTY 15
28
29struct seq_file; 23struct seq_file;
30 24
31/* 25/*
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 82dfe5d0c41e..265f908c4a6e 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -41,12 +41,12 @@
41 */ 41 */
42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) 42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
43#define tlb_fast_mode(tlb) 0 43#define tlb_fast_mode(tlb) 0
44#define FREE_PTE_NR 500
45#else 44#else
46#define tlb_fast_mode(tlb) 1 45#define tlb_fast_mode(tlb) 1
47#define FREE_PTE_NR 0
48#endif 46#endif
49 47
48#define MMU_GATHER_BUNDLE 8
49
50/* 50/*
51 * TLB handling. This allows us to remove pages from the page 51 * TLB handling. This allows us to remove pages from the page
52 * tables, and efficiently handle the TLB issues. 52 * tables, and efficiently handle the TLB issues.
@@ -58,7 +58,9 @@ struct mmu_gather {
58 unsigned long range_start; 58 unsigned long range_start;
59 unsigned long range_end; 59 unsigned long range_end;
60 unsigned int nr; 60 unsigned int nr;
61 struct page *pages[FREE_PTE_NR]; 61 unsigned int max;
62 struct page **pages;
63 struct page *local[MMU_GATHER_BUNDLE];
62}; 64};
63 65
64DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 66DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -97,26 +99,37 @@ static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
97 } 99 }
98} 100}
99 101
102static inline void __tlb_alloc_page(struct mmu_gather *tlb)
103{
104 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
105
106 if (addr) {
107 tlb->pages = (void *)addr;
108 tlb->max = PAGE_SIZE / sizeof(struct page *);
109 }
110}
111
100static inline void tlb_flush_mmu(struct mmu_gather *tlb) 112static inline void tlb_flush_mmu(struct mmu_gather *tlb)
101{ 113{
102 tlb_flush(tlb); 114 tlb_flush(tlb);
103 if (!tlb_fast_mode(tlb)) { 115 if (!tlb_fast_mode(tlb)) {
104 free_pages_and_swap_cache(tlb->pages, tlb->nr); 116 free_pages_and_swap_cache(tlb->pages, tlb->nr);
105 tlb->nr = 0; 117 tlb->nr = 0;
118 if (tlb->pages == tlb->local)
119 __tlb_alloc_page(tlb);
106 } 120 }
107} 121}
108 122
109static inline struct mmu_gather * 123static inline void
110tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 124tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
111{ 125{
112 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
113
114 tlb->mm = mm; 126 tlb->mm = mm;
115 tlb->fullmm = full_mm_flush; 127 tlb->fullmm = fullmm;
116 tlb->vma = NULL; 128 tlb->vma = NULL;
129 tlb->max = ARRAY_SIZE(tlb->local);
130 tlb->pages = tlb->local;
117 tlb->nr = 0; 131 tlb->nr = 0;
118 132 __tlb_alloc_page(tlb);
119 return tlb;
120} 133}
121 134
122static inline void 135static inline void
@@ -127,7 +140,8 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
127 /* keep the page table cache within bounds */ 140 /* keep the page table cache within bounds */
128 check_pgt_cache(); 141 check_pgt_cache();
129 142
130 put_cpu_var(mmu_gathers); 143 if (tlb->pages != tlb->local)
144 free_pages((unsigned long)tlb->pages, 0);
131} 145}
132 146
133/* 147/*
@@ -162,15 +176,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
162 tlb_flush(tlb); 176 tlb_flush(tlb);
163} 177}
164 178
165static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 179static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
166{ 180{
167 if (tlb_fast_mode(tlb)) { 181 if (tlb_fast_mode(tlb)) {
168 free_page_and_swap_cache(page); 182 free_page_and_swap_cache(page);
169 } else { 183 return 1; /* avoid calling tlb_flush_mmu */
170 tlb->pages[tlb->nr++] = page;
171 if (tlb->nr >= FREE_PTE_NR)
172 tlb_flush_mmu(tlb);
173 } 184 }
185
186 tlb->pages[tlb->nr++] = page;
187 VM_BUG_ON(tlb->nr > tlb->max);
188 return tlb->max - tlb->nr;
189}
190
191static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
192{
193 if (!__tlb_remove_page(tlb, page))
194 tlb_flush_mmu(tlb);
174} 195}
175 196
176static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 197static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
new file mode 100644
index 000000000000..292d55ed2113
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
@@ -0,0 +1,78 @@
1/*
2 * PTP 1588 clock using the IXP46X
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef _IXP46X_TS_H_
22#define _IXP46X_TS_H_
23
24#define DEFAULT_ADDEND 0xF0000029
25#define TICKS_NS_SHIFT 4
26
27struct ixp46x_channel_ctl {
28 u32 ch_control; /* 0x40 Time Synchronization Channel Control */
29 u32 ch_event; /* 0x44 Time Synchronization Channel Event */
30 u32 tx_snap_lo; /* 0x48 Transmit Snapshot Low Register */
31 u32 tx_snap_hi; /* 0x4C Transmit Snapshot High Register */
32 u32 rx_snap_lo; /* 0x50 Receive Snapshot Low Register */
33 u32 rx_snap_hi; /* 0x54 Receive Snapshot High Register */
34 u32 src_uuid_lo; /* 0x58 Source UUID0 Low Register */
35 u32 src_uuid_hi; /* 0x5C Sequence Identifier/Source UUID0 High */
36};
37
38struct ixp46x_ts_regs {
39 u32 control; /* 0x00 Time Sync Control Register */
40 u32 event; /* 0x04 Time Sync Event Register */
41 u32 addend; /* 0x08 Time Sync Addend Register */
42 u32 accum; /* 0x0C Time Sync Accumulator Register */
43 u32 test; /* 0x10 Time Sync Test Register */
44 u32 unused; /* 0x14 */
45 u32 rsystime_lo; /* 0x18 RawSystemTime_Low Register */
46 u32 rsystime_hi; /* 0x1C RawSystemTime_High Register */
47 u32 systime_lo; /* 0x20 SystemTime_Low Register */
48 u32 systime_hi; /* 0x24 SystemTime_High Register */
49 u32 trgt_lo; /* 0x28 TargetTime_Low Register */
50 u32 trgt_hi; /* 0x2C TargetTime_High Register */
51 u32 asms_lo; /* 0x30 Auxiliary Slave Mode Snapshot Low */
52 u32 asms_hi; /* 0x34 Auxiliary Slave Mode Snapshot High */
53 u32 amms_lo; /* 0x38 Auxiliary Master Mode Snapshot Low */
54 u32 amms_hi; /* 0x3C Auxiliary Master Mode Snapshot High */
55
56 struct ixp46x_channel_ctl channel[3];
57};
58
59/* 0x00 Time Sync Control Register Bits */
60#define TSCR_AMM (1<<3)
61#define TSCR_ASM (1<<2)
62#define TSCR_TTM (1<<1)
63#define TSCR_RST (1<<0)
64
65/* 0x04 Time Sync Event Register Bits */
66#define TSER_SNM (1<<3)
67#define TSER_SNS (1<<2)
68#define TTIPEND (1<<1)
69
70/* 0x40 Time Synchronization Channel Control Register Bits */
71#define MASTER_MODE (1<<0)
72#define TIMESTAMP_ALL (1<<1)
73
74/* 0x44 Time Synchronization Channel Event Register Bits */
75#define TX_SNAPSHOT_LOCKED (1<<0)
76#define RX_SNAPSHOT_LOCKED (1<<1)
77
78#endif
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 9afd087cc29c..23244cd0a5b6 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -37,8 +37,8 @@
37#include <plat/common.h> 37#include <plat/common.h>
38#include <plat/dma.h> 38#include <plat/dma.h>
39#include <plat/gpmc.h> 39#include <plat/gpmc.h>
40#include <plat/display.h> 40#include <video/omapdss.h>
41#include <plat/panel-generic-dpi.h> 41#include <video/omap-panel-generic-dpi.h>
42 42
43#include <plat/gpmc-smc91x.h> 43#include <plat/gpmc-smc91x.h>
44 44
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 56702c5e577f..93edd7fcf451 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -36,7 +36,7 @@
36#include <plat/usb.h> 36#include <plat/usb.h>
37#include <plat/mmc.h> 37#include <plat/mmc.h>
38#include <plat/omap4-keypad.h> 38#include <plat/omap4-keypad.h>
39#include <plat/display.h> 39#include <video/omapdss.h>
40 40
41#include "mux.h" 41#include "mux.h"
42#include "hsmmc.h" 42#include "hsmmc.h"
@@ -680,6 +680,15 @@ static struct omap_dss_device sdp4430_hdmi_device = {
680 .name = "hdmi", 680 .name = "hdmi",
681 .driver_name = "hdmi_panel", 681 .driver_name = "hdmi_panel",
682 .type = OMAP_DISPLAY_TYPE_HDMI, 682 .type = OMAP_DISPLAY_TYPE_HDMI,
683 .clocks = {
684 .dispc = {
685 .dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK,
686 },
687 .hdmi = {
688 .regn = 15,
689 .regm2 = 1,
690 },
691 },
683 .platform_enable = sdp4430_panel_enable_hdmi, 692 .platform_enable = sdp4430_panel_enable_hdmi,
684 .platform_disable = sdp4430_panel_disable_hdmi, 693 .platform_disable = sdp4430_panel_disable_hdmi,
685 .channel = OMAP_DSS_CHANNEL_DIGIT, 694 .channel = OMAP_DSS_CHANNEL_DIGIT,
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index ce7d5e6e4150..ff8c59be36e5 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -34,8 +34,8 @@
34#include <plat/board.h> 34#include <plat/board.h>
35#include <plat/common.h> 35#include <plat/common.h>
36#include <plat/usb.h> 36#include <plat/usb.h>
37#include <plat/display.h> 37#include <video/omapdss.h>
38#include <plat/panel-generic-dpi.h> 38#include <video/omap-panel-generic-dpi.h>
39 39
40#include "mux.h" 40#include "mux.h"
41#include "control.h" 41#include "control.h"
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 02a12b41c0ff..9340f6a06f4a 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -45,8 +45,8 @@
45#include <plat/nand.h> 45#include <plat/nand.h>
46#include <plat/gpmc.h> 46#include <plat/gpmc.h>
47#include <plat/usb.h> 47#include <plat/usb.h>
48#include <plat/display.h> 48#include <video/omapdss.h>
49#include <plat/panel-generic-dpi.h> 49#include <video/omap-panel-generic-dpi.h>
50#include <plat/mcspi.h> 50#include <plat/mcspi.h>
51 51
52#include <mach/hardware.h> 52#include <mach/hardware.h>
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 65f9fde2c567..1d1b56a29fb1 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -45,8 +45,8 @@
45#include <plat/gpmc.h> 45#include <plat/gpmc.h>
46#include <plat/nand.h> 46#include <plat/nand.h>
47#include <plat/usb.h> 47#include <plat/usb.h>
48#include <plat/display.h> 48#include <video/omapdss.h>
49#include <plat/panel-generic-dpi.h> 49#include <video/omap-panel-generic-dpi.h>
50 50
51#include <plat/mcspi.h> 51#include <plat/mcspi.h>
52#include <linux/input/matrix_keypad.h> 52#include <linux/input/matrix_keypad.h>
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 34cf982b9679..3da64d361651 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -31,8 +31,8 @@
31#include <plat/common.h> 31#include <plat/common.h>
32#include <plat/gpmc.h> 32#include <plat/gpmc.h>
33#include <plat/usb.h> 33#include <plat/usb.h>
34#include <plat/display.h> 34#include <video/omapdss.h>
35#include <plat/panel-generic-dpi.h> 35#include <video/omap-panel-generic-dpi.h>
36#include <plat/onenand.h> 36#include <plat/onenand.h>
37 37
38#include "mux.h" 38#include "mux.h"
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 33007fd4a083..97750d483a70 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -41,8 +41,8 @@
41 41
42#include <plat/board.h> 42#include <plat/board.h>
43#include <plat/common.h> 43#include <plat/common.h>
44#include <plat/display.h> 44#include <video/omapdss.h>
45#include <plat/panel-generic-dpi.h> 45#include <video/omap-panel-generic-dpi.h>
46#include <plat/gpmc.h> 46#include <plat/gpmc.h>
47#include <plat/nand.h> 47#include <plat/nand.h>
48#include <plat/usb.h> 48#include <plat/usb.h>
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 5a1a916e5cc8..7f94cccdb076 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -44,8 +44,8 @@
44#include <plat/usb.h> 44#include <plat/usb.h>
45#include <plat/common.h> 45#include <plat/common.h>
46#include <plat/mcspi.h> 46#include <plat/mcspi.h>
47#include <plat/display.h> 47#include <video/omapdss.h>
48#include <plat/panel-generic-dpi.h> 48#include <video/omap-panel-generic-dpi.h>
49 49
50#include "mux.h" 50#include "mux.h"
51#include "sdram-micron-mt46h32m32lf-6.h" 51#include "sdram-micron-mt46h32m32lf-6.h"
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 07dba888f450..1db15492d82b 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -46,7 +46,7 @@
46#include <mach/hardware.h> 46#include <mach/hardware.h>
47#include <plat/mcspi.h> 47#include <plat/mcspi.h>
48#include <plat/usb.h> 48#include <plat/usb.h>
49#include <plat/display.h> 49#include <video/omapdss.h>
50#include <plat/nand.h> 50#include <plat/nand.h>
51 51
52#include "mux.h" 52#include "mux.h"
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index a6e0b9161c99..a72c90a08c8a 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -39,8 +39,8 @@
39#include <plat/gpmc.h> 39#include <plat/gpmc.h>
40#include <plat/nand.h> 40#include <plat/nand.h>
41#include <plat/usb.h> 41#include <plat/usb.h>
42#include <plat/display.h> 42#include <video/omapdss.h>
43#include <plat/panel-generic-dpi.h> 43#include <video/omap-panel-generic-dpi.h>
44 44
45#include <plat/mcspi.h> 45#include <plat/mcspi.h>
46#include <linux/input/matrix_keypad.h> 46#include <linux/input/matrix_keypad.h>
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index f3a7b1011914..e4973ac77cbc 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -34,13 +34,13 @@
34#include <asm/mach-types.h> 34#include <asm/mach-types.h>
35#include <asm/mach/arch.h> 35#include <asm/mach/arch.h>
36#include <asm/mach/map.h> 36#include <asm/mach/map.h>
37#include <plat/display.h> 37#include <video/omapdss.h>
38 38
39#include <plat/board.h> 39#include <plat/board.h>
40#include <plat/common.h> 40#include <plat/common.h>
41#include <plat/usb.h> 41#include <plat/usb.h>
42#include <plat/mmc.h> 42#include <plat/mmc.h>
43#include <plat/panel-generic-dpi.h> 43#include <video/omap-panel-generic-dpi.h>
44#include "timer-gp.h" 44#include "timer-gp.h"
45 45
46#include "hsmmc.h" 46#include "hsmmc.h"
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 59ca33326b8c..9d192ff3b9ac 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -43,8 +43,8 @@
43 43
44#include <plat/board.h> 44#include <plat/board.h>
45#include <plat/common.h> 45#include <plat/common.h>
46#include <plat/display.h> 46#include <video/omapdss.h>
47#include <plat/panel-generic-dpi.h> 47#include <video/omap-panel-generic-dpi.h>
48#include <mach/gpio.h> 48#include <mach/gpio.h>
49#include <plat/gpmc.h> 49#include <plat/gpmc.h>
50#include <mach/hardware.h> 50#include <mach/hardware.h>
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index 89a66db8b77d..2df10b6a5940 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -15,7 +15,7 @@
15#include <linux/spi/spi.h> 15#include <linux/spi/spi.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18#include <plat/display.h> 18#include <video/omapdss.h>
19#include <plat/vram.h> 19#include <plat/vram.h>
20#include <plat/mcspi.h> 20#include <plat/mcspi.h>
21 21
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
index 37b84c2b850f..60e8645db59d 100644
--- a/arch/arm/mach-omap2/board-zoom-display.c
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -15,7 +15,7 @@
15#include <linux/i2c/twl.h> 15#include <linux/i2c/twl.h>
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <plat/mcspi.h> 17#include <plat/mcspi.h>
18#include <plat/display.h> 18#include <video/omapdss.h>
19 19
20#define LCD_PANEL_RESET_GPIO_PROD 96 20#define LCD_PANEL_RESET_GPIO_PROD 96
21#define LCD_PANEL_RESET_GPIO_PILOT 55 21#define LCD_PANEL_RESET_GPIO_PILOT 55
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 256d23fb79ab..543fcb8b518c 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -22,7 +22,7 @@
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/err.h> 23#include <linux/err.h>
24 24
25#include <plat/display.h> 25#include <video/omapdss.h>
26#include <plat/omap_hwmod.h> 26#include <plat/omap_hwmod.h>
27#include <plat/omap_device.h> 27#include <plat/omap_device.h>
28 28
@@ -56,37 +56,58 @@ static bool opt_clock_available(const char *clk_role)
56 return false; 56 return false;
57} 57}
58 58
59struct omap_dss_hwmod_data {
60 const char *oh_name;
61 const char *dev_name;
62 const int id;
63};
64
65static const struct omap_dss_hwmod_data omap2_dss_hwmod_data[] __initdata = {
66 { "dss_core", "omapdss_dss", -1 },
67 { "dss_dispc", "omapdss_dispc", -1 },
68 { "dss_rfbi", "omapdss_rfbi", -1 },
69 { "dss_venc", "omapdss_venc", -1 },
70};
71
72static const struct omap_dss_hwmod_data omap3_dss_hwmod_data[] __initdata = {
73 { "dss_core", "omapdss_dss", -1 },
74 { "dss_dispc", "omapdss_dispc", -1 },
75 { "dss_rfbi", "omapdss_rfbi", -1 },
76 { "dss_venc", "omapdss_venc", -1 },
77 { "dss_dsi1", "omapdss_dsi1", -1 },
78};
79
80static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initdata = {
81 { "dss_core", "omapdss_dss", -1 },
82 { "dss_dispc", "omapdss_dispc", -1 },
83 { "dss_rfbi", "omapdss_rfbi", -1 },
84 { "dss_venc", "omapdss_venc", -1 },
85 { "dss_dsi1", "omapdss_dsi1", -1 },
86 { "dss_dsi2", "omapdss_dsi2", -1 },
87 { "dss_hdmi", "omapdss_hdmi", -1 },
88};
89
59int __init omap_display_init(struct omap_dss_board_info *board_data) 90int __init omap_display_init(struct omap_dss_board_info *board_data)
60{ 91{
61 int r = 0; 92 int r = 0;
62 struct omap_hwmod *oh; 93 struct omap_hwmod *oh;
63 struct omap_device *od; 94 struct omap_device *od;
64 int i; 95 int i, oh_count;
65 struct omap_display_platform_data pdata; 96 struct omap_display_platform_data pdata;
66 97 const struct omap_dss_hwmod_data *curr_dss_hwmod;
67 /*
68 * omap: valid DSS hwmod names
69 * omap2,3,4: dss_core, dss_dispc, dss_rfbi, dss_venc
70 * omap3,4: dss_dsi1
71 * omap4: dss_dsi2, dss_hdmi
72 */
73 char *oh_name[] = { "dss_core", "dss_dispc", "dss_rfbi", "dss_venc",
74 "dss_dsi1", "dss_dsi2", "dss_hdmi" };
75 char *dev_name[] = { "omapdss_dss", "omapdss_dispc", "omapdss_rfbi",
76 "omapdss_venc", "omapdss_dsi1", "omapdss_dsi2",
77 "omapdss_hdmi" };
78 int oh_count;
79 98
80 memset(&pdata, 0, sizeof(pdata)); 99 memset(&pdata, 0, sizeof(pdata));
81 100
82 if (cpu_is_omap24xx()) 101 if (cpu_is_omap24xx()) {
83 oh_count = ARRAY_SIZE(oh_name) - 3; 102 curr_dss_hwmod = omap2_dss_hwmod_data;
84 /* last 3 hwmod dev in oh_name are not available for omap2 */ 103 oh_count = ARRAY_SIZE(omap2_dss_hwmod_data);
85 else if (cpu_is_omap44xx()) 104 } else if (cpu_is_omap34xx()) {
86 oh_count = ARRAY_SIZE(oh_name); 105 curr_dss_hwmod = omap3_dss_hwmod_data;
87 else 106 oh_count = ARRAY_SIZE(omap3_dss_hwmod_data);
88 oh_count = ARRAY_SIZE(oh_name) - 2; 107 } else {
89 /* last 2 hwmod dev in oh_name are not available for omap3 */ 108 curr_dss_hwmod = omap4_dss_hwmod_data;
109 oh_count = ARRAY_SIZE(omap4_dss_hwmod_data);
110 }
90 111
91 /* opt_clks are always associated with dss hwmod */ 112 /* opt_clks are always associated with dss hwmod */
92 oh_core = omap_hwmod_lookup("dss_core"); 113 oh_core = omap_hwmod_lookup("dss_core");
@@ -100,19 +121,21 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
100 pdata.opt_clock_available = opt_clock_available; 121 pdata.opt_clock_available = opt_clock_available;
101 122
102 for (i = 0; i < oh_count; i++) { 123 for (i = 0; i < oh_count; i++) {
103 oh = omap_hwmod_lookup(oh_name[i]); 124 oh = omap_hwmod_lookup(curr_dss_hwmod[i].oh_name);
104 if (!oh) { 125 if (!oh) {
105 pr_err("Could not look up %s\n", oh_name[i]); 126 pr_err("Could not look up %s\n",
127 curr_dss_hwmod[i].oh_name);
106 return -ENODEV; 128 return -ENODEV;
107 } 129 }
108 130
109 od = omap_device_build(dev_name[i], -1, oh, &pdata, 131 od = omap_device_build(curr_dss_hwmod[i].dev_name,
132 curr_dss_hwmod[i].id, oh, &pdata,
110 sizeof(struct omap_display_platform_data), 133 sizeof(struct omap_display_platform_data),
111 omap_dss_latency, 134 omap_dss_latency,
112 ARRAY_SIZE(omap_dss_latency), 0); 135 ARRAY_SIZE(omap_dss_latency), 0);
113 136
114 if (WARN((IS_ERR(od)), "Could not build omap_device for %s\n", 137 if (WARN((IS_ERR(od)), "Could not build omap_device for %s\n",
115 oh_name[i])) 138 curr_dss_hwmod[i].oh_name))
116 return -ENODEV; 139 return -ENODEV;
117 } 140 }
118 omap_display_device.dev.platform_data = board_data; 141 omap_display_device.dev.platform_data = board_data;
diff --git a/arch/arm/mach-omap2/include/mach/board-zoom.h b/arch/arm/mach-omap2/include/mach/board-zoom.h
index d20bd9c1a106..775fdc3b000b 100644
--- a/arch/arm/mach-omap2/include/mach/board-zoom.h
+++ b/arch/arm/mach-omap2/include/mach/board-zoom.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Defines for zoom boards 2 * Defines for zoom boards
3 */ 3 */
4#include <plat/display.h> 4#include <video/omapdss.h>
5 5
6#define ZOOM_NAND_CS 0 6#define ZOOM_NAND_CS 0
7 7
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index e2507f66f9d5..612b27000c3e 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -30,6 +30,11 @@ obj-$(CONFIG_ARCH_SH7377) += entry-intc.o
30obj-$(CONFIG_ARCH_SH7372) += entry-intc.o 30obj-$(CONFIG_ARCH_SH7372) += entry-intc.o
31obj-$(CONFIG_ARCH_SH73A0) += entry-gic.o 31obj-$(CONFIG_ARCH_SH73A0) += entry-gic.o
32 32
33# PM objects
34obj-$(CONFIG_SUSPEND) += suspend.o
35obj-$(CONFIG_CPU_IDLE) += cpuidle.o
36obj-$(CONFIG_ARCH_SH7372) += pm-sh7372.o sleep-sh7372.o
37
33# Board objects 38# Board objects
34obj-$(CONFIG_MACH_G3EVM) += board-g3evm.o 39obj-$(CONFIG_MACH_G3EVM) += board-g3evm.o
35obj-$(CONFIG_MACH_G4EVM) += board-g4evm.o 40obj-$(CONFIG_MACH_G4EVM) += board-g4evm.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 3e6f0aab460b..c95258c274c1 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -34,6 +34,8 @@
34#include <linux/input/sh_keysc.h> 34#include <linux/input/sh_keysc.h>
35#include <linux/mmc/host.h> 35#include <linux/mmc/host.h>
36#include <linux/mmc/sh_mmcif.h> 36#include <linux/mmc/sh_mmcif.h>
37#include <linux/mmc/sh_mobile_sdhi.h>
38#include <linux/mfd/tmio.h>
37#include <linux/sh_clk.h> 39#include <linux/sh_clk.h>
38#include <video/sh_mobile_lcdc.h> 40#include <video/sh_mobile_lcdc.h>
39#include <video/sh_mipi_dsi.h> 41#include <video/sh_mipi_dsi.h>
@@ -156,10 +158,19 @@ static struct resource sh_mmcif_resources[] = {
156 }, 158 },
157}; 159};
158 160
161static struct sh_mmcif_dma sh_mmcif_dma = {
162 .chan_priv_rx = {
163 .slave_id = SHDMA_SLAVE_MMCIF_RX,
164 },
165 .chan_priv_tx = {
166 .slave_id = SHDMA_SLAVE_MMCIF_TX,
167 },
168};
159static struct sh_mmcif_plat_data sh_mmcif_platdata = { 169static struct sh_mmcif_plat_data sh_mmcif_platdata = {
160 .sup_pclk = 0, 170 .sup_pclk = 0,
161 .ocr = MMC_VDD_165_195, 171 .ocr = MMC_VDD_165_195,
162 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, 172 .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
173 .dma = &sh_mmcif_dma,
163}; 174};
164 175
165static struct platform_device mmc_device = { 176static struct platform_device mmc_device = {
@@ -296,11 +307,13 @@ static struct platform_device lcdc0_device = {
296/* MIPI-DSI */ 307/* MIPI-DSI */
297static struct resource mipidsi0_resources[] = { 308static struct resource mipidsi0_resources[] = {
298 [0] = { 309 [0] = {
310 .name = "DSI0",
299 .start = 0xfeab0000, 311 .start = 0xfeab0000,
300 .end = 0xfeab3fff, 312 .end = 0xfeab3fff,
301 .flags = IORESOURCE_MEM, 313 .flags = IORESOURCE_MEM,
302 }, 314 },
303 [1] = { 315 [1] = {
316 .name = "DSI0",
304 .start = 0xfeab4000, 317 .start = 0xfeab4000,
305 .end = 0xfeab7fff, 318 .end = 0xfeab7fff,
306 .flags = IORESOURCE_MEM, 319 .flags = IORESOURCE_MEM,
@@ -325,6 +338,89 @@ static struct platform_device mipidsi0_device = {
325 }, 338 },
326}; 339};
327 340
341static struct sh_mobile_sdhi_info sdhi0_info = {
342 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
343 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
344 .tmio_caps = MMC_CAP_SD_HIGHSPEED,
345 .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
346};
347
348static struct resource sdhi0_resources[] = {
349 [0] = {
350 .name = "SDHI0",
351 .start = 0xee100000,
352 .end = 0xee1000ff,
353 .flags = IORESOURCE_MEM,
354 },
355 [1] = {
356 .start = gic_spi(83),
357 .flags = IORESOURCE_IRQ,
358 },
359 [2] = {
360 .start = gic_spi(84),
361 .flags = IORESOURCE_IRQ,
362 },
363 [3] = {
364 .start = gic_spi(85),
365 .flags = IORESOURCE_IRQ,
366 },
367};
368
369static struct platform_device sdhi0_device = {
370 .name = "sh_mobile_sdhi",
371 .id = 0,
372 .num_resources = ARRAY_SIZE(sdhi0_resources),
373 .resource = sdhi0_resources,
374 .dev = {
375 .platform_data = &sdhi0_info,
376 },
377};
378
379void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
380{
381 gpio_set_value(GPIO_PORT114, state);
382}
383
384static struct sh_mobile_sdhi_info sh_sdhi1_platdata = {
385 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
386 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
387 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
388 .tmio_caps = MMC_CAP_NONREMOVABLE,
389 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
390 .set_pwr = ag5evm_sdhi1_set_pwr,
391};
392
393static struct resource sdhi1_resources[] = {
394 [0] = {
395 .name = "SDHI1",
396 .start = 0xee120000,
397 .end = 0xee1200ff,
398 .flags = IORESOURCE_MEM,
399 },
400 [1] = {
401 .start = gic_spi(87),
402 .flags = IORESOURCE_IRQ,
403 },
404 [2] = {
405 .start = gic_spi(88),
406 .flags = IORESOURCE_IRQ,
407 },
408 [3] = {
409 .start = gic_spi(89),
410 .flags = IORESOURCE_IRQ,
411 },
412};
413
414static struct platform_device sdhi1_device = {
415 .name = "sh_mobile_sdhi",
416 .id = 1,
417 .dev = {
418 .platform_data = &sh_sdhi1_platdata,
419 },
420 .num_resources = ARRAY_SIZE(sdhi1_resources),
421 .resource = sdhi1_resources,
422};
423
328static struct platform_device *ag5evm_devices[] __initdata = { 424static struct platform_device *ag5evm_devices[] __initdata = {
329 &eth_device, 425 &eth_device,
330 &keysc_device, 426 &keysc_device,
@@ -333,6 +429,8 @@ static struct platform_device *ag5evm_devices[] __initdata = {
333 &irda_device, 429 &irda_device,
334 &lcdc0_device, 430 &lcdc0_device,
335 &mipidsi0_device, 431 &mipidsi0_device,
432 &sdhi0_device,
433 &sdhi1_device,
336}; 434};
337 435
338static struct map_desc ag5evm_io_desc[] __initdata = { 436static struct map_desc ag5evm_io_desc[] __initdata = {
@@ -454,6 +552,26 @@ static void __init ag5evm_init(void)
454 /* MIPI-DSI clock setup */ 552 /* MIPI-DSI clock setup */
455 __raw_writel(0x2a809010, DSI0PHYCR); 553 __raw_writel(0x2a809010, DSI0PHYCR);
456 554
555 /* enable SDHI0 on CN15 [SD I/F] */
556 gpio_request(GPIO_FN_SDHICD0, NULL);
557 gpio_request(GPIO_FN_SDHIWP0, NULL);
558 gpio_request(GPIO_FN_SDHICMD0, NULL);
559 gpio_request(GPIO_FN_SDHICLK0, NULL);
560 gpio_request(GPIO_FN_SDHID0_3, NULL);
561 gpio_request(GPIO_FN_SDHID0_2, NULL);
562 gpio_request(GPIO_FN_SDHID0_1, NULL);
563 gpio_request(GPIO_FN_SDHID0_0, NULL);
564
565 /* enable SDHI1 on CN4 [WLAN I/F] */
566 gpio_request(GPIO_FN_SDHICLK1, NULL);
567 gpio_request(GPIO_FN_SDHICMD1_PU, NULL);
568 gpio_request(GPIO_FN_SDHID1_3_PU, NULL);
569 gpio_request(GPIO_FN_SDHID1_2_PU, NULL);
570 gpio_request(GPIO_FN_SDHID1_1_PU, NULL);
571 gpio_request(GPIO_FN_SDHID1_0_PU, NULL);
572 gpio_request(GPIO_PORT114, "sdhi1_power");
573 gpio_direction_output(GPIO_PORT114, 0);
574
457#ifdef CONFIG_CACHE_L2X0 575#ifdef CONFIG_CACHE_L2X0
458 /* Shared attribute override enable, 64K*8way */ 576 /* Shared attribute override enable, 64K*8way */
459 l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff); 577 l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff);
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 1e35fa976d64..08acb6ec8139 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -316,8 +316,16 @@ static struct resource sdhi0_resources[] = {
316 .flags = IORESOURCE_MEM, 316 .flags = IORESOURCE_MEM,
317 }, 317 },
318 [1] = { 318 [1] = {
319 .start = evt2irq(0x0e00) /* SDHI0 */, 319 .start = evt2irq(0x0e00) /* SDHI0_SDHI0I0 */,
320 .flags = IORESOURCE_IRQ, 320 .flags = IORESOURCE_IRQ,
321 },
322 [2] = {
323 .start = evt2irq(0x0e20) /* SDHI0_SDHI0I1 */,
324 .flags = IORESOURCE_IRQ,
325 },
326 [3] = {
327 .start = evt2irq(0x0e40) /* SDHI0_SDHI0I2 */,
328 .flags = IORESOURCE_IRQ,
321 }, 329 },
322}; 330};
323 331
@@ -349,8 +357,16 @@ static struct resource sdhi1_resources[] = {
349 .flags = IORESOURCE_MEM, 357 .flags = IORESOURCE_MEM,
350 }, 358 },
351 [1] = { 359 [1] = {
352 .start = evt2irq(0x0e80), 360 .start = evt2irq(0x0e80), /* SDHI1_SDHI1I0 */
353 .flags = IORESOURCE_IRQ, 361 .flags = IORESOURCE_IRQ,
362 },
363 [2] = {
364 .start = evt2irq(0x0ea0), /* SDHI1_SDHI1I1 */
365 .flags = IORESOURCE_IRQ,
366 },
367 [3] = {
368 .start = evt2irq(0x0ec0), /* SDHI1_SDHI1I2 */
369 .flags = IORESOURCE_IRQ,
354 }, 370 },
355}; 371};
356 372
@@ -980,11 +996,6 @@ static void __init hdmi_init_pm_clock(void)
980 goto out; 996 goto out;
981 } 997 }
982 998
983 ret = clk_enable(&sh7372_pllc2_clk);
984 if (ret < 0) {
985 pr_err("Cannot enable pllc2 clock\n");
986 goto out;
987 }
988 pr_debug("PLLC2 set frequency %lu\n", rate); 999 pr_debug("PLLC2 set frequency %lu\n", rate);
989 1000
990 ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk); 1001 ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
@@ -1343,6 +1354,7 @@ static void __init ap4evb_init(void)
1343 1354
1344 hdmi_init_pm_clock(); 1355 hdmi_init_pm_clock();
1345 fsi_init_pm_clock(); 1356 fsi_init_pm_clock();
1357 sh7372_pm_init();
1346} 1358}
1347 1359
1348static void __init ap4evb_timer_init(void) 1360static void __init ap4evb_timer_init(void)
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index c87a7b7c5832..8e3c5559f27f 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -205,7 +205,7 @@ static struct resource sdhi0_resources[] = {
205 [0] = { 205 [0] = {
206 .name = "SDHI0", 206 .name = "SDHI0",
207 .start = 0xe6d50000, 207 .start = 0xe6d50000,
208 .end = 0xe6d50nff, 208 .end = 0xe6d500ff,
209 .flags = IORESOURCE_MEM, 209 .flags = IORESOURCE_MEM,
210 }, 210 },
211 [1] = { 211 [1] = {
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 7da2ca24229d..448ddbe43335 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -43,6 +43,7 @@
43#include <linux/sh_intc.h> 43#include <linux/sh_intc.h>
44#include <linux/tca6416_keypad.h> 44#include <linux/tca6416_keypad.h>
45#include <linux/usb/r8a66597.h> 45#include <linux/usb/r8a66597.h>
46#include <linux/usb/renesas_usbhs.h>
46 47
47#include <video/sh_mobile_hdmi.h> 48#include <video/sh_mobile_hdmi.h>
48#include <video/sh_mobile_lcdc.h> 49#include <video/sh_mobile_lcdc.h>
@@ -143,7 +144,30 @@
143 * open | external VBUS | Function 144 * open | external VBUS | Function
144 * 145 *
145 * *1 146 * *1
146 * CN31 is used as Host in Linux. 147 * CN31 is used as
148 * CONFIG_USB_R8A66597_HCD Host
149 * CONFIG_USB_RENESAS_USBHS Function
150 *
151 * CAUTION
152 *
153 * renesas_usbhs driver can use external interrupt mode
154 * (which come from USB-PHY) or autonomy mode (it use own interrupt)
155 * for detecting connection/disconnection when Function.
156 * USB will be power OFF while it has been disconnecting
157 * if external interrupt mode, and it is always power ON if autonomy mode,
158 *
159 * mackerel can not use external interrupt (IRQ7-PORT167) mode on "USB0",
160 * because Touchscreen is using IRQ7-PORT40.
161 * It is impossible to use IRQ7 demux on this board.
162 *
163 * We can use external interrupt mode USB-Function on "USB1".
164 * USB1 can become Host by r8a66597, and become Function by renesas_usbhs.
165 * But don't select both drivers in same time.
166 * These uses same IRQ number for request_irq(), and aren't supporting
167 * IRQF_SHARD / IORESOURCE_IRQ_SHAREABLE.
168 *
169 * Actually these are old/new version of USB driver.
170 * This mean its register will be broken if it supports SHARD IRQ,
147 */ 171 */
148 172
149/* 173/*
@@ -185,6 +209,7 @@
185 * FIXME !! 209 * FIXME !!
186 * 210 *
187 * gpio_no_direction 211 * gpio_no_direction
212 * gpio_pull_down
188 * are quick_hack. 213 * are quick_hack.
189 * 214 *
190 * current gpio frame work doesn't have 215 * current gpio frame work doesn't have
@@ -196,6 +221,16 @@ static void __init gpio_no_direction(u32 addr)
196 __raw_writeb(0x00, addr); 221 __raw_writeb(0x00, addr);
197} 222}
198 223
224static void __init gpio_pull_down(u32 addr)
225{
226 u8 data = __raw_readb(addr);
227
228 data &= 0x0F;
229 data |= 0xA0;
230
231 __raw_writeb(data, addr);
232}
233
199/* MTD */ 234/* MTD */
200static struct mtd_partition nor_flash_partitions[] = { 235static struct mtd_partition nor_flash_partitions[] = {
201 { 236 {
@@ -458,12 +493,6 @@ static void __init hdmi_init_pm_clock(void)
458 goto out; 493 goto out;
459 } 494 }
460 495
461 ret = clk_enable(&sh7372_pllc2_clk);
462 if (ret < 0) {
463 pr_err("Cannot enable pllc2 clock\n");
464 goto out;
465 }
466
467 pr_debug("PLLC2 set frequency %lu\n", rate); 496 pr_debug("PLLC2 set frequency %lu\n", rate);
468 497
469 ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk); 498 ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
@@ -515,6 +544,157 @@ static struct platform_device usb1_host_device = {
515 .resource = usb1_host_resources, 544 .resource = usb1_host_resources,
516}; 545};
517 546
547/* USB1 (Function) */
548#define USB_PHY_MODE (1 << 4)
549#define USB_PHY_INT_EN ((1 << 3) | (1 << 2))
550#define USB_PHY_ON (1 << 1)
551#define USB_PHY_OFF (1 << 0)
552#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF)
553
554struct usbhs_private {
555 unsigned int irq;
556 unsigned int usbphyaddr;
557 unsigned int usbcrcaddr;
558 struct renesas_usbhs_platform_info info;
559};
560
561#define usbhs_get_priv(pdev) \
562 container_of(renesas_usbhs_get_info(pdev), \
563 struct usbhs_private, info)
564
565#define usbhs_is_connected(priv) \
566 (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
567
568static int usbhs1_get_id(struct platform_device *pdev)
569{
570 return USBHS_GADGET;
571}
572
573static int usbhs1_get_vbus(struct platform_device *pdev)
574{
575 return usbhs_is_connected(usbhs_get_priv(pdev));
576}
577
578static irqreturn_t usbhs1_interrupt(int irq, void *data)
579{
580 struct platform_device *pdev = data;
581 struct usbhs_private *priv = usbhs_get_priv(pdev);
582
583 dev_dbg(&pdev->dev, "%s\n", __func__);
584
585 renesas_usbhs_call_notify_hotplug(pdev);
586
587 /* clear status */
588 __raw_writew(__raw_readw(priv->usbphyaddr) | USB_PHY_INT_CLR,
589 priv->usbphyaddr);
590
591 return IRQ_HANDLED;
592}
593
594static int usbhs1_hardware_init(struct platform_device *pdev)
595{
596 struct usbhs_private *priv = usbhs_get_priv(pdev);
597 int ret;
598
599 irq_set_irq_type(priv->irq, IRQ_TYPE_LEVEL_HIGH);
600
601 /* clear interrupt status */
602 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
603
604 ret = request_irq(priv->irq, usbhs1_interrupt, 0,
605 dev_name(&pdev->dev), pdev);
606 if (ret) {
607 dev_err(&pdev->dev, "request_irq err\n");
608 return ret;
609 }
610
611 /* enable USB phy interrupt */
612 __raw_writew(USB_PHY_MODE | USB_PHY_INT_EN, priv->usbphyaddr);
613
614 return 0;
615}
616
617static void usbhs1_hardware_exit(struct platform_device *pdev)
618{
619 struct usbhs_private *priv = usbhs_get_priv(pdev);
620
621 /* clear interrupt status */
622 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
623
624 free_irq(priv->irq, pdev);
625}
626
627static void usbhs1_phy_reset(struct platform_device *pdev)
628{
629 struct usbhs_private *priv = usbhs_get_priv(pdev);
630
631 /* init phy */
632 __raw_writew(0x8a0a, priv->usbcrcaddr);
633}
634
635static u32 usbhs1_pipe_cfg[] = {
636 USB_ENDPOINT_XFER_CONTROL,
637 USB_ENDPOINT_XFER_ISOC,
638 USB_ENDPOINT_XFER_ISOC,
639 USB_ENDPOINT_XFER_BULK,
640 USB_ENDPOINT_XFER_BULK,
641 USB_ENDPOINT_XFER_BULK,
642 USB_ENDPOINT_XFER_INT,
643 USB_ENDPOINT_XFER_INT,
644 USB_ENDPOINT_XFER_INT,
645 USB_ENDPOINT_XFER_BULK,
646 USB_ENDPOINT_XFER_BULK,
647 USB_ENDPOINT_XFER_BULK,
648 USB_ENDPOINT_XFER_BULK,
649 USB_ENDPOINT_XFER_BULK,
650 USB_ENDPOINT_XFER_BULK,
651 USB_ENDPOINT_XFER_BULK,
652};
653
654static struct usbhs_private usbhs1_private = {
655 .irq = evt2irq(0x0300), /* IRQ8 */
656 .usbphyaddr = 0xE60581E2, /* USBPHY1INTAP */
657 .usbcrcaddr = 0xE6058130, /* USBCR4 */
658 .info = {
659 .platform_callback = {
660 .hardware_init = usbhs1_hardware_init,
661 .hardware_exit = usbhs1_hardware_exit,
662 .phy_reset = usbhs1_phy_reset,
663 .get_id = usbhs1_get_id,
664 .get_vbus = usbhs1_get_vbus,
665 },
666 .driver_param = {
667 .buswait_bwait = 4,
668 .pipe_type = usbhs1_pipe_cfg,
669 .pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg),
670 },
671 },
672};
673
674static struct resource usbhs1_resources[] = {
675 [0] = {
676 .name = "USBHS",
677 .start = 0xE68B0000,
678 .end = 0xE68B00E6 - 1,
679 .flags = IORESOURCE_MEM,
680 },
681 [1] = {
682 .start = evt2irq(0x1ce0) /* USB1_USB1I0 */,
683 .flags = IORESOURCE_IRQ,
684 },
685};
686
687static struct platform_device usbhs1_device = {
688 .name = "renesas_usbhs",
689 .id = 1,
690 .dev = {
691 .platform_data = &usbhs1_private.info,
692 },
693 .num_resources = ARRAY_SIZE(usbhs1_resources),
694 .resource = usbhs1_resources,
695};
696
697
518/* LED */ 698/* LED */
519static struct gpio_led mackerel_leds[] = { 699static struct gpio_led mackerel_leds[] = {
520 { 700 {
@@ -690,7 +870,15 @@ static struct resource sdhi0_resources[] = {
690 .flags = IORESOURCE_MEM, 870 .flags = IORESOURCE_MEM,
691 }, 871 },
692 [1] = { 872 [1] = {
693 .start = evt2irq(0x0e00) /* SDHI0 */, 873 .start = evt2irq(0x0e00) /* SDHI0_SDHI0I0 */,
874 .flags = IORESOURCE_IRQ,
875 },
876 [2] = {
877 .start = evt2irq(0x0e20) /* SDHI0_SDHI0I1 */,
878 .flags = IORESOURCE_IRQ,
879 },
880 [3] = {
881 .start = evt2irq(0x0e40) /* SDHI0_SDHI0I2 */,
694 .flags = IORESOURCE_IRQ, 882 .flags = IORESOURCE_IRQ,
695 }, 883 },
696}; 884};
@@ -705,7 +893,7 @@ static struct platform_device sdhi0_device = {
705 }, 893 },
706}; 894};
707 895
708#if !defined(CONFIG_MMC_SH_MMCIF) 896#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
709/* SDHI1 */ 897/* SDHI1 */
710static struct sh_mobile_sdhi_info sdhi1_info = { 898static struct sh_mobile_sdhi_info sdhi1_info = {
711 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, 899 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
@@ -725,7 +913,15 @@ static struct resource sdhi1_resources[] = {
725 .flags = IORESOURCE_MEM, 913 .flags = IORESOURCE_MEM,
726 }, 914 },
727 [1] = { 915 [1] = {
728 .start = evt2irq(0x0e80), 916 .start = evt2irq(0x0e80), /* SDHI1_SDHI1I0 */
917 .flags = IORESOURCE_IRQ,
918 },
919 [2] = {
920 .start = evt2irq(0x0ea0), /* SDHI1_SDHI1I1 */
921 .flags = IORESOURCE_IRQ,
922 },
923 [3] = {
924 .start = evt2irq(0x0ec0), /* SDHI1_SDHI1I2 */
729 .flags = IORESOURCE_IRQ, 925 .flags = IORESOURCE_IRQ,
730 }, 926 },
731}; 927};
@@ -768,7 +964,15 @@ static struct resource sdhi2_resources[] = {
768 .flags = IORESOURCE_MEM, 964 .flags = IORESOURCE_MEM,
769 }, 965 },
770 [1] = { 966 [1] = {
771 .start = evt2irq(0x1200), 967 .start = evt2irq(0x1200), /* SDHI2_SDHI2I0 */
968 .flags = IORESOURCE_IRQ,
969 },
970 [2] = {
971 .start = evt2irq(0x1220), /* SDHI2_SDHI2I1 */
972 .flags = IORESOURCE_IRQ,
973 },
974 [3] = {
975 .start = evt2irq(0x1240), /* SDHI2_SDHI2I2 */
772 .flags = IORESOURCE_IRQ, 976 .flags = IORESOURCE_IRQ,
773 }, 977 },
774}; 978};
@@ -803,6 +1007,15 @@ static struct resource sh_mmcif_resources[] = {
803 }, 1007 },
804}; 1008};
805 1009
1010static struct sh_mmcif_dma sh_mmcif_dma = {
1011 .chan_priv_rx = {
1012 .slave_id = SHDMA_SLAVE_MMCIF_RX,
1013 },
1014 .chan_priv_tx = {
1015 .slave_id = SHDMA_SLAVE_MMCIF_TX,
1016 },
1017};
1018
806static struct sh_mmcif_plat_data sh_mmcif_plat = { 1019static struct sh_mmcif_plat_data sh_mmcif_plat = {
807 .sup_pclk = 0, 1020 .sup_pclk = 0,
808 .ocr = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34, 1021 .ocr = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
@@ -810,6 +1023,7 @@ static struct sh_mmcif_plat_data sh_mmcif_plat = {
810 MMC_CAP_8_BIT_DATA | 1023 MMC_CAP_8_BIT_DATA |
811 MMC_CAP_NEEDS_POLL, 1024 MMC_CAP_NEEDS_POLL,
812 .get_cd = slot_cn7_get_cd, 1025 .get_cd = slot_cn7_get_cd,
1026 .dma = &sh_mmcif_dma,
813}; 1027};
814 1028
815static struct platform_device sh_mmcif_device = { 1029static struct platform_device sh_mmcif_device = {
@@ -858,37 +1072,23 @@ static struct soc_camera_link camera_link = {
858 .priv = &camera_info, 1072 .priv = &camera_info,
859}; 1073};
860 1074
861static void dummy_release(struct device *dev) 1075static struct platform_device *camera_device;
1076
1077static void mackerel_camera_release(struct device *dev)
862{ 1078{
1079 soc_camera_platform_release(&camera_device);
863} 1080}
864 1081
865static struct platform_device camera_device = {
866 .name = "soc_camera_platform",
867 .dev = {
868 .platform_data = &camera_info,
869 .release = dummy_release,
870 },
871};
872
873static int mackerel_camera_add(struct soc_camera_link *icl, 1082static int mackerel_camera_add(struct soc_camera_link *icl,
874 struct device *dev) 1083 struct device *dev)
875{ 1084{
876 if (icl != &camera_link) 1085 return soc_camera_platform_add(icl, dev, &camera_device, &camera_link,
877 return -ENODEV; 1086 mackerel_camera_release, 0);
878
879 camera_info.dev = dev;
880
881 return platform_device_register(&camera_device);
882} 1087}
883 1088
884static void mackerel_camera_del(struct soc_camera_link *icl) 1089static void mackerel_camera_del(struct soc_camera_link *icl)
885{ 1090{
886 if (icl != &camera_link) 1091 soc_camera_platform_del(icl, camera_device, &camera_link);
887 return;
888
889 platform_device_unregister(&camera_device);
890 memset(&camera_device.dev.kobj, 0,
891 sizeof(camera_device.dev.kobj));
892} 1092}
893 1093
894static struct sh_mobile_ceu_info sh_mobile_ceu_info = { 1094static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
@@ -935,12 +1135,13 @@ static struct platform_device *mackerel_devices[] __initdata = {
935 &smc911x_device, 1135 &smc911x_device,
936 &lcdc_device, 1136 &lcdc_device,
937 &usb1_host_device, 1137 &usb1_host_device,
1138 &usbhs1_device,
938 &leds_device, 1139 &leds_device,
939 &fsi_device, 1140 &fsi_device,
940 &fsi_ak4643_device, 1141 &fsi_ak4643_device,
941 &fsi_hdmi_device, 1142 &fsi_hdmi_device,
942 &sdhi0_device, 1143 &sdhi0_device,
943#if !defined(CONFIG_MMC_SH_MMCIF) 1144#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
944 &sdhi1_device, 1145 &sdhi1_device,
945#endif 1146#endif
946 &sdhi2_device, 1147 &sdhi2_device,
@@ -1030,6 +1231,7 @@ static void __init mackerel_map_io(void)
1030 1231
1031#define GPIO_PORT9CR 0xE6051009 1232#define GPIO_PORT9CR 0xE6051009
1032#define GPIO_PORT10CR 0xE605100A 1233#define GPIO_PORT10CR 0xE605100A
1234#define GPIO_PORT168CR 0xE60520A8
1033#define SRCR4 0xe61580bc 1235#define SRCR4 0xe61580bc
1034#define USCCR1 0xE6058144 1236#define USCCR1 0xE6058144
1035static void __init mackerel_init(void) 1237static void __init mackerel_init(void)
@@ -1088,6 +1290,7 @@ static void __init mackerel_init(void)
1088 gpio_request(GPIO_FN_OVCN_1_114, NULL); 1290 gpio_request(GPIO_FN_OVCN_1_114, NULL);
1089 gpio_request(GPIO_FN_EXTLP_1, NULL); 1291 gpio_request(GPIO_FN_EXTLP_1, NULL);
1090 gpio_request(GPIO_FN_OVCN2_1, NULL); 1292 gpio_request(GPIO_FN_OVCN2_1, NULL);
1293 gpio_pull_down(GPIO_PORT168CR);
1091 1294
1092 /* setup USB phy */ 1295 /* setup USB phy */
1093 __raw_writew(0x8a0a, 0xE6058130); /* USBCR4 */ 1296 __raw_writew(0x8a0a, 0xE6058130); /* USBCR4 */
@@ -1140,7 +1343,7 @@ static void __init mackerel_init(void)
1140 gpio_request(GPIO_FN_SDHID0_1, NULL); 1343 gpio_request(GPIO_FN_SDHID0_1, NULL);
1141 gpio_request(GPIO_FN_SDHID0_0, NULL); 1344 gpio_request(GPIO_FN_SDHID0_0, NULL);
1142 1345
1143#if !defined(CONFIG_MMC_SH_MMCIF) 1346#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
1144 /* enable SDHI1 */ 1347 /* enable SDHI1 */
1145 gpio_request(GPIO_FN_SDHICMD1, NULL); 1348 gpio_request(GPIO_FN_SDHICMD1, NULL);
1146 gpio_request(GPIO_FN_SDHICLK1, NULL); 1349 gpio_request(GPIO_FN_SDHICLK1, NULL);
@@ -1216,6 +1419,7 @@ static void __init mackerel_init(void)
1216 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); 1419 platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
1217 1420
1218 hdmi_init_pm_clock(); 1421 hdmi_init_pm_clock();
1422 sh7372_pm_init();
1219} 1423}
1220 1424
1221static void __init mackerel_timer_init(void) 1425static void __init mackerel_timer_init(void)
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index e9731b5a73ed..d17eb66f4ac2 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -44,6 +44,11 @@
44#define DSI1PCKCR 0xe6150098 44#define DSI1PCKCR 0xe6150098
45#define PLLC01CR 0xe6150028 45#define PLLC01CR 0xe6150028
46#define PLLC2CR 0xe615002c 46#define PLLC2CR 0xe615002c
47#define RMSTPCR0 0xe6150110
48#define RMSTPCR1 0xe6150114
49#define RMSTPCR2 0xe6150118
50#define RMSTPCR3 0xe615011c
51#define RMSTPCR4 0xe6150120
47#define SMSTPCR0 0xe6150130 52#define SMSTPCR0 0xe6150130
48#define SMSTPCR1 0xe6150134 53#define SMSTPCR1 0xe6150134
49#define SMSTPCR2 0xe6150138 54#define SMSTPCR2 0xe6150138
@@ -421,9 +426,6 @@ static unsigned long fsidiv_recalc(struct clk *clk)
421 426
422 value = __raw_readl(clk->mapping->base); 427 value = __raw_readl(clk->mapping->base);
423 428
424 if ((value & 0x3) != 0x3)
425 return 0;
426
427 value >>= 16; 429 value >>= 16;
428 if (value < 2) 430 if (value < 2)
429 return 0; 431 return 0;
@@ -504,7 +506,7 @@ static struct clk *late_main_clks[] = {
504enum { MSTP001, 506enum { MSTP001,
505 MSTP131, MSTP130, 507 MSTP131, MSTP130,
506 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, 508 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
507 MSTP118, MSTP117, MSTP116, 509 MSTP118, MSTP117, MSTP116, MSTP113,
508 MSTP106, MSTP101, MSTP100, 510 MSTP106, MSTP101, MSTP100,
509 MSTP223, 511 MSTP223,
510 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 512 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
@@ -527,6 +529,7 @@ static struct clk mstp_clks[MSTP_NR] = {
527 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */ 529 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
528 [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ 530 [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
529 [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ 531 [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
532 [MSTP113] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 13, 0), /* MERAM */
530 [MSTP106] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 6, 0), /* JPU */ 533 [MSTP106] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 6, 0), /* JPU */
531 [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */ 534 [MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */
532 [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ 535 [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
@@ -617,6 +620,7 @@ static struct clk_lookup lookups[] = {
617 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX0 */ 620 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX0 */
618 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */ 621 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
619 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */ 622 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
623 CLKDEV_DEV_ID("sh_mobile_meram.0", &mstp_clks[MSTP113]), /* MERAM */
620 CLKDEV_DEV_ID("uio_pdrv_genirq.5", &mstp_clks[MSTP106]), /* JPU */ 624 CLKDEV_DEV_ID("uio_pdrv_genirq.5", &mstp_clks[MSTP106]), /* JPU */
621 CLKDEV_DEV_ID("uio_pdrv_genirq.0", &mstp_clks[MSTP101]), /* VPU */ 625 CLKDEV_DEV_ID("uio_pdrv_genirq.0", &mstp_clks[MSTP101]), /* VPU */
622 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ 626 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
@@ -634,6 +638,7 @@ static struct clk_lookup lookups[] = {
634 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */ 638 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
635 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */ 639 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
636 CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */ 640 CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */
641 CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP322]), /* USB0 */
637 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ 642 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
638 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ 643 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
639 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */ 644 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */
@@ -644,6 +649,7 @@ static struct clk_lookup lookups[] = {
644 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */ 649 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */
645 CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */ 650 CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */
646 CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */ 651 CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */
652 CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */
647 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ 653 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
648 654
649 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), 655 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
@@ -655,6 +661,13 @@ void __init sh7372_clock_init(void)
655{ 661{
656 int k, ret = 0; 662 int k, ret = 0;
657 663
664 /* make sure MSTP bits on the RT/SH4AL-DSP side are off */
665 __raw_writel(0xe4ef8087, RMSTPCR0);
666 __raw_writel(0xffffffff, RMSTPCR1);
667 __raw_writel(0x37c7f7ff, RMSTPCR2);
668 __raw_writel(0xffffffff, RMSTPCR3);
669 __raw_writel(0xffe0fffd, RMSTPCR4);
670
658 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) 671 for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
659 ret = clk_register(main_clks[k]); 672 ret = clk_register(main_clks[k]);
660 673
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 7e58904c1c8c..bcacb1e8cf85 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -266,7 +266,8 @@ enum { MSTP001,
266 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100, 266 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
267 MSTP219, 267 MSTP219,
268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, 269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
270 MSTP314, MSTP313, MSTP312, MSTP311,
270 MSTP411, MSTP410, MSTP403, 271 MSTP411, MSTP410, MSTP403,
271 MSTP_NR }; 272 MSTP_NR };
272 273
@@ -295,7 +296,11 @@ static struct clk mstp_clks[MSTP_NR] = {
295 [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ 296 [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
296 [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */ 297 [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
297 [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */ 298 [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
299 [MSTP318] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* SY-DMAC */
300 [MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
301 [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
298 [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */ 302 [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
303 [MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
299 [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */ 304 [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
300 [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */ 305 [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
301 [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */ 306 [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@@ -313,6 +318,9 @@ static struct clk_lookup lookups[] = {
313 CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), 318 CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
314 CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]), 319 CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
315 CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), 320 CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
321 CLKDEV_CON_ID("sdhi0_clk", &div6_clks[DIV6_SDHI0]),
322 CLKDEV_CON_ID("sdhi1_clk", &div6_clks[DIV6_SDHI1]),
323 CLKDEV_CON_ID("sdhi2_clk", &div6_clks[DIV6_SDHI2]),
316 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), 324 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
317 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), 325 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
318 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), 326 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
@@ -341,7 +349,11 @@ static struct clk_lookup lookups[] = {
341 CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */ 349 CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
342 CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */ 350 CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
343 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */ 351 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
352 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP318]), /* SY-DMAC */
353 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
354 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
344 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */ 355 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
356 CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
345 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */ 357 CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
346 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */ 358 CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
347 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */ 359 CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
@@ -351,6 +363,11 @@ void __init sh73a0_clock_init(void)
351{ 363{
352 int k, ret = 0; 364 int k, ret = 0;
353 365
366 /* Set SDHI clocks to a known state */
367 __raw_writel(0x108, SD0CKCR);
368 __raw_writel(0x108, SD1CKCR);
369 __raw_writel(0x108, SD2CKCR);
370
354 /* detect main clock parent */ 371 /* detect main clock parent */
355 switch ((__raw_readl(CKSCR) >> 24) & 0x03) { 372 switch ((__raw_readl(CKSCR) >> 24) & 0x03) {
356 case 0: 373 case 0:
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
new file mode 100644
index 000000000000..2e44f11f592e
--- /dev/null
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -0,0 +1,92 @@
1/*
2 * CPUIdle support code for SH-Mobile ARM
3 *
4 * Copyright (C) 2011 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pm.h>
12#include <linux/cpuidle.h>
13#include <linux/suspend.h>
14#include <linux/module.h>
15#include <linux/err.h>
16#include <asm/system.h>
17#include <asm/io.h>
18
19static void shmobile_enter_wfi(void)
20{
21 cpu_do_idle();
22}
23
24void (*shmobile_cpuidle_modes[CPUIDLE_STATE_MAX])(void) = {
25 shmobile_enter_wfi, /* regular sleep mode */
26};
27
28static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
29 struct cpuidle_state *state)
30{
31 ktime_t before, after;
32 int requested_state = state - &dev->states[0];
33
34 dev->last_state = &dev->states[requested_state];
35 before = ktime_get();
36
37 local_irq_disable();
38 local_fiq_disable();
39
40 shmobile_cpuidle_modes[requested_state]();
41
42 local_irq_enable();
43 local_fiq_enable();
44
45 after = ktime_get();
46 return ktime_to_ns(ktime_sub(after, before)) >> 10;
47}
48
49static struct cpuidle_device shmobile_cpuidle_dev;
50static struct cpuidle_driver shmobile_cpuidle_driver = {
51 .name = "shmobile_cpuidle",
52 .owner = THIS_MODULE,
53};
54
55void (*shmobile_cpuidle_setup)(struct cpuidle_device *dev);
56
57static int shmobile_cpuidle_init(void)
58{
59 struct cpuidle_device *dev = &shmobile_cpuidle_dev;
60 struct cpuidle_state *state;
61 int i;
62
63 cpuidle_register_driver(&shmobile_cpuidle_driver);
64
65 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
66 dev->states[i].name[0] = '\0';
67 dev->states[i].desc[0] = '\0';
68 dev->states[i].enter = shmobile_cpuidle_enter;
69 }
70
71 i = CPUIDLE_DRIVER_STATE_START;
72
73 state = &dev->states[i++];
74 snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
75 strncpy(state->desc, "WFI", CPUIDLE_DESC_LEN);
76 state->exit_latency = 1;
77 state->target_residency = 1 * 2;
78 state->power_usage = 3;
79 state->flags = 0;
80 state->flags |= CPUIDLE_FLAG_TIME_VALID;
81
82 dev->safe_state = state;
83 dev->state_count = i;
84
85 if (shmobile_cpuidle_setup)
86 shmobile_cpuidle_setup(dev);
87
88 cpuidle_register_device(dev);
89
90 return 0;
91}
92late_initcall(shmobile_cpuidle_init);
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index d4cec6b4c7d9..26079d933d91 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -24,4 +24,4 @@
24 .align 12 24 .align 12
25ENTRY(shmobile_secondary_vector) 25ENTRY(shmobile_secondary_vector)
26 ldr pc, 1f 26 ldr pc, 1f
271: .long secondary_startup - PAGE_OFFSET + PHYS_OFFSET 271: .long secondary_startup - PAGE_OFFSET + PLAT_PHYS_OFFSET
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 013ac0ee8256..06aecb31d9c7 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -8,6 +8,10 @@ struct clk;
8extern int clk_init(void); 8extern int clk_init(void);
9extern void shmobile_handle_irq_intc(struct pt_regs *); 9extern void shmobile_handle_irq_intc(struct pt_regs *);
10extern void shmobile_handle_irq_gic(struct pt_regs *); 10extern void shmobile_handle_irq_gic(struct pt_regs *);
11extern struct platform_suspend_ops shmobile_suspend_ops;
12struct cpuidle_device;
13extern void (*shmobile_cpuidle_modes[])(void);
14extern void (*shmobile_cpuidle_setup)(struct cpuidle_device *dev);
11 15
12extern void sh7367_init_irq(void); 16extern void sh7367_init_irq(void);
13extern void sh7367_add_early_devices(void); 17extern void sh7367_add_early_devices(void);
@@ -30,6 +34,9 @@ extern void sh7372_add_early_devices(void);
30extern void sh7372_add_standard_devices(void); 34extern void sh7372_add_standard_devices(void);
31extern void sh7372_clock_init(void); 35extern void sh7372_clock_init(void);
32extern void sh7372_pinmux_init(void); 36extern void sh7372_pinmux_init(void);
37extern void sh7372_pm_init(void);
38extern void sh7372_cpu_suspend(void);
39extern void sh7372_cpu_resume(void);
33extern struct clk sh7372_extal1_clk; 40extern struct clk sh7372_extal1_clk;
34extern struct clk sh7372_extal2_clk; 41extern struct clk sh7372_extal2_clk;
35 42
diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
index 3029aba38688..9f134dfeffdc 100644
--- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
@@ -87,8 +87,7 @@ WAIT 1, 0xFE40009C
87ED 0xFE400354, 0x01AD8002 87ED 0xFE400354, 0x01AD8002
88 88
89LIST "SCIF0 - Serial port for earlyprintk" 89LIST "SCIF0 - Serial port for earlyprintk"
90EB 0xE6053098, 0x11
91EB 0xE6053098, 0xe1 90EB 0xE6053098, 0xe1
92EW 0xE6C40000, 0x0000 91EW 0xE6C40000, 0x0000
93EB 0xE6C40004, 0x19 92EB 0xE6C40004, 0x19
94EW 0xE6C40008, 0x3000 93EW 0xE6C40008, 0x0030
diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
index 3029aba38688..9f134dfeffdc 100644
--- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
@@ -87,8 +87,7 @@ WAIT 1, 0xFE40009C
87ED 0xFE400354, 0x01AD8002 87ED 0xFE400354, 0x01AD8002
88 88
89LIST "SCIF0 - Serial port for earlyprintk" 89LIST "SCIF0 - Serial port for earlyprintk"
90EB 0xE6053098, 0x11
91EB 0xE6053098, 0xe1 90EB 0xE6053098, 0xe1
92EW 0xE6C40000, 0x0000 91EW 0xE6C40000, 0x0000
93EB 0xE6C40004, 0x19 92EB 0xE6C40004, 0x19
94EW 0xE6C40008, 0x3000 93EW 0xE6C40008, 0x0030
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 5736efcca60c..df20d7670172 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -435,6 +435,7 @@ enum {
435 435
436/* DMA slave IDs */ 436/* DMA slave IDs */
437enum { 437enum {
438 SHDMA_SLAVE_INVALID,
438 SHDMA_SLAVE_SCIF0_TX, 439 SHDMA_SLAVE_SCIF0_TX,
439 SHDMA_SLAVE_SCIF0_RX, 440 SHDMA_SLAVE_SCIF0_RX,
440 SHDMA_SLAVE_SCIF1_TX, 441 SHDMA_SLAVE_SCIF1_TX,
diff --git a/arch/arm/mach-shmobile/include/mach/sh73a0.h b/arch/arm/mach-shmobile/include/mach/sh73a0.h
index ceb2cdc92bf9..216c3d695ef1 100644
--- a/arch/arm/mach-shmobile/include/mach/sh73a0.h
+++ b/arch/arm/mach-shmobile/include/mach/sh73a0.h
@@ -463,5 +463,35 @@ enum {
463 GPIO_FN_FSIAIBT_PU, 463 GPIO_FN_FSIAIBT_PU,
464 GPIO_FN_FSIAISLD_PU, 464 GPIO_FN_FSIAISLD_PU,
465}; 465};
466/* DMA slave IDs */
467enum {
468 SHDMA_SLAVE_INVALID,
469 SHDMA_SLAVE_SCIF0_TX,
470 SHDMA_SLAVE_SCIF0_RX,
471 SHDMA_SLAVE_SCIF1_TX,
472 SHDMA_SLAVE_SCIF1_RX,
473 SHDMA_SLAVE_SCIF2_TX,
474 SHDMA_SLAVE_SCIF2_RX,
475 SHDMA_SLAVE_SCIF3_TX,
476 SHDMA_SLAVE_SCIF3_RX,
477 SHDMA_SLAVE_SCIF4_TX,
478 SHDMA_SLAVE_SCIF4_RX,
479 SHDMA_SLAVE_SCIF5_TX,
480 SHDMA_SLAVE_SCIF5_RX,
481 SHDMA_SLAVE_SCIF6_TX,
482 SHDMA_SLAVE_SCIF6_RX,
483 SHDMA_SLAVE_SCIF7_TX,
484 SHDMA_SLAVE_SCIF7_RX,
485 SHDMA_SLAVE_SCIF8_TX,
486 SHDMA_SLAVE_SCIF8_RX,
487 SHDMA_SLAVE_SDHI0_TX,
488 SHDMA_SLAVE_SDHI0_RX,
489 SHDMA_SLAVE_SDHI1_TX,
490 SHDMA_SLAVE_SDHI1_RX,
491 SHDMA_SLAVE_SDHI2_TX,
492 SHDMA_SLAVE_SDHI2_RX,
493 SHDMA_SLAVE_MMCIF_TX,
494 SHDMA_SLAVE_MMCIF_RX,
495};
466 496
467#endif /* __ASM_SH73A0_H__ */ 497#endif /* __ASM_SH73A0_H__ */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 7a4960f9c1e3..3b28743c77eb 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -27,8 +27,6 @@
27 27
28enum { 28enum {
29 UNUSED_INTCA = 0, 29 UNUSED_INTCA = 0,
30 ENABLED,
31 DISABLED,
32 30
33 /* interrupt sources INTCA */ 31 /* interrupt sources INTCA */
34 IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, 32 IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A,
@@ -49,14 +47,14 @@ enum {
49 MSIOF2, MSIOF1, 47 MSIOF2, MSIOF1,
50 SCIFA4, SCIFA5, SCIFB, 48 SCIFA4, SCIFA5, SCIFB,
51 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, 49 FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
52 SDHI0, 50 SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3,
53 SDHI1, 51 SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2,
54 IRREM, 52 IRREM,
55 IRDA, 53 IRDA,
56 TPU0, 54 TPU0,
57 TTI20, 55 TTI20,
58 DDM, 56 DDM,
59 SDHI2, 57 SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3,
60 RWDT0, 58 RWDT0,
61 DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3, 59 DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3,
62 DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR, 60 DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR,
@@ -84,7 +82,7 @@ enum {
84 82
85 /* interrupt groups INTCA */ 83 /* interrupt groups INTCA */
86 DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT, 84 DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT,
87 AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1 85 AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1, SDHI0, SDHI1, SDHI2
88}; 86};
89 87
90static struct intc_vect intca_vectors[] __initdata = { 88static struct intc_vect intca_vectors[] __initdata = {
@@ -125,17 +123,17 @@ static struct intc_vect intca_vectors[] __initdata = {
125 INTC_VECT(SCIFB, 0x0d60), 123 INTC_VECT(SCIFB, 0x0d60),
126 INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), 124 INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0),
127 INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), 125 INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0),
128 INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20), 126 INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20),
129 INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60), 127 INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60),
130 INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0), 128 INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0),
131 INTC_VECT(SDHI1, 0x0ec0), 129 INTC_VECT(SDHI1_SDHI1I2, 0x0ec0),
132 INTC_VECT(IRREM, 0x0f60), 130 INTC_VECT(IRREM, 0x0f60),
133 INTC_VECT(IRDA, 0x0480), 131 INTC_VECT(IRDA, 0x0480),
134 INTC_VECT(TPU0, 0x04a0), 132 INTC_VECT(TPU0, 0x04a0),
135 INTC_VECT(TTI20, 0x1100), 133 INTC_VECT(TTI20, 0x1100),
136 INTC_VECT(DDM, 0x1140), 134 INTC_VECT(DDM, 0x1140),
137 INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220), 135 INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220),
138 INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260), 136 INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260),
139 INTC_VECT(RWDT0, 0x1280), 137 INTC_VECT(RWDT0, 0x1280),
140 INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020), 138 INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020),
141 INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060), 139 INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060),
@@ -195,6 +193,12 @@ static struct intc_group intca_groups[] __initdata = {
195 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, 193 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI,
196 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), 194 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
197 INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), 195 INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1),
196 INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1,
197 SDHI0_SDHI0I2, SDHI0_SDHI0I3),
198 INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1,
199 SDHI1_SDHI1I2),
200 INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1,
201 SDHI2_SDHI2I2, SDHI2_SDHI2I3),
198 INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM), 202 INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM),
199}; 203};
200 204
@@ -230,10 +234,10 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = {
230 { SCIFB, SCIFA5, SCIFA4, MSIOF1, 234 { SCIFB, SCIFA5, SCIFA4, MSIOF1,
231 0, 0, MSIOF2, 0 } }, 235 0, 0, MSIOF2, 0 } },
232 { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ 236 { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
233 { DISABLED, ENABLED, ENABLED, ENABLED, 237 { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0,
234 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, 238 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
235 { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ 239 { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
236 { 0, ENABLED, ENABLED, ENABLED, 240 { 0, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0,
237 TTI20, USBHSDMAC0_USHDMI, 0, 0 } }, 241 TTI20, USBHSDMAC0_USHDMI, 0, 0 } },
238 { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ 242 { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
239 { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, 243 { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -248,7 +252,7 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = {
248 { 0, 0, TPU0, 0, 252 { 0, 0, TPU0, 0,
249 0, 0, 0, 0 } }, 253 0, 0, 0, 0 } },
250 { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ 254 { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */
251 { DISABLED, DISABLED, ENABLED, ENABLED, 255 { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0,
252 0, CMT3, 0, RWDT0 } }, 256 0, CMT3, 0, RWDT0 } },
253 { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */ 257 { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */
254 { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0, 258 { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0,
@@ -354,14 +358,10 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = {
354 { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, 358 { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } },
355}; 359};
356 360
357static struct intc_desc intca_desc __initdata = { 361static DECLARE_INTC_DESC_ACK(intca_desc, "sh7372-intca",
358 .name = "sh7372-intca", 362 intca_vectors, intca_groups,
359 .force_enable = ENABLED, 363 intca_mask_registers, intca_prio_registers,
360 .force_disable = DISABLED, 364 intca_sense_registers, intca_ack_registers);
361 .hw = INTC_HW_DESC(intca_vectors, intca_groups,
362 intca_mask_registers, intca_prio_registers,
363 intca_sense_registers, intca_ack_registers),
364};
365 365
366enum { 366enum {
367 UNUSED_INTCS = 0, 367 UNUSED_INTCS = 0,
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
new file mode 100644
index 000000000000..8e4aadf14c9f
--- /dev/null
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -0,0 +1,108 @@
1/*
2 * sh7372 Power management support
3 *
4 * Copyright (C) 2011 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pm.h>
12#include <linux/suspend.h>
13#include <linux/cpuidle.h>
14#include <linux/module.h>
15#include <linux/list.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <asm/system.h>
19#include <asm/io.h>
20#include <asm/tlbflush.h>
21#include <mach/common.h>
22
23#define SMFRAM 0xe6a70000
24#define SYSTBCR 0xe6150024
25#define SBAR 0xe6180020
26#define APARMBAREA 0xe6f10020
27
28static void sh7372_enter_core_standby(void)
29{
30 void __iomem *smfram = (void __iomem *)SMFRAM;
31
32 __raw_writel(0, APARMBAREA); /* translate 4k */
33 __raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */
34 __raw_writel(0x10, SYSTBCR); /* enable core standby */
35
36 __raw_writel(0, smfram + 0x3c); /* clear page table address */
37
38 sh7372_cpu_suspend();
39 cpu_init();
40
41 /* if page table address is non-NULL then we have been powered down */
42 if (__raw_readl(smfram + 0x3c)) {
43 __raw_writel(__raw_readl(smfram + 0x40),
44 __va(__raw_readl(smfram + 0x3c)));
45
46 flush_tlb_all();
47 set_cr(__raw_readl(smfram + 0x38));
48 }
49
50 __raw_writel(0, SYSTBCR); /* disable core standby */
51 __raw_writel(0, SBAR); /* disable reset vector translation */
52}
53
54#ifdef CONFIG_CPU_IDLE
55static void sh7372_cpuidle_setup(struct cpuidle_device *dev)
56{
57 struct cpuidle_state *state;
58 int i = dev->state_count;
59
60 state = &dev->states[i];
61 snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
62 strncpy(state->desc, "Core Standby Mode", CPUIDLE_DESC_LEN);
63 state->exit_latency = 10;
64 state->target_residency = 20 + 10;
65 state->power_usage = 1; /* perhaps not */
66 state->flags = 0;
67 state->flags |= CPUIDLE_FLAG_TIME_VALID;
68 shmobile_cpuidle_modes[i] = sh7372_enter_core_standby;
69
70 dev->state_count = i + 1;
71}
72
73static void sh7372_cpuidle_init(void)
74{
75 shmobile_cpuidle_setup = sh7372_cpuidle_setup;
76}
77#else
78static void sh7372_cpuidle_init(void) {}
79#endif
80
81#ifdef CONFIG_SUSPEND
82static int sh7372_enter_suspend(suspend_state_t suspend_state)
83{
84 sh7372_enter_core_standby();
85 return 0;
86}
87
88static void sh7372_suspend_init(void)
89{
90 shmobile_suspend_ops.enter = sh7372_enter_suspend;
91}
92#else
93static void sh7372_suspend_init(void) {}
94#endif
95
96#define DBGREG1 0xe6100020
97#define DBGREG9 0xe6100040
98
99void __init sh7372_pm_init(void)
100{
101 /* enable DBG hardware block to kick SYSC */
102 __raw_writel(0x0000a500, DBGREG9);
103 __raw_writel(0x0000a501, DBGREG9);
104 __raw_writel(0x00000000, DBGREG1);
105
106 sh7372_suspend_init();
107 sh7372_cpuidle_init();
108}
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index ce28141662da..2c10190dbb55 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/uio_driver.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/input.h> 27#include <linux/input.h>
27#include <linux/io.h> 28#include <linux/io.h>
@@ -195,6 +196,214 @@ static struct platform_device cmt10_device = {
195 .num_resources = ARRAY_SIZE(cmt10_resources), 196 .num_resources = ARRAY_SIZE(cmt10_resources),
196}; 197};
197 198
199/* VPU */
200static struct uio_info vpu_platform_data = {
201 .name = "VPU5",
202 .version = "0",
203 .irq = intcs_evt2irq(0x980),
204};
205
206static struct resource vpu_resources[] = {
207 [0] = {
208 .name = "VPU",
209 .start = 0xfe900000,
210 .end = 0xfe902807,
211 .flags = IORESOURCE_MEM,
212 },
213};
214
215static struct platform_device vpu_device = {
216 .name = "uio_pdrv_genirq",
217 .id = 0,
218 .dev = {
219 .platform_data = &vpu_platform_data,
220 },
221 .resource = vpu_resources,
222 .num_resources = ARRAY_SIZE(vpu_resources),
223};
224
225/* VEU0 */
226static struct uio_info veu0_platform_data = {
227 .name = "VEU0",
228 .version = "0",
229 .irq = intcs_evt2irq(0x700),
230};
231
232static struct resource veu0_resources[] = {
233 [0] = {
234 .name = "VEU0",
235 .start = 0xfe920000,
236 .end = 0xfe9200b7,
237 .flags = IORESOURCE_MEM,
238 },
239};
240
241static struct platform_device veu0_device = {
242 .name = "uio_pdrv_genirq",
243 .id = 1,
244 .dev = {
245 .platform_data = &veu0_platform_data,
246 },
247 .resource = veu0_resources,
248 .num_resources = ARRAY_SIZE(veu0_resources),
249};
250
251/* VEU1 */
252static struct uio_info veu1_platform_data = {
253 .name = "VEU1",
254 .version = "0",
255 .irq = intcs_evt2irq(0x720),
256};
257
258static struct resource veu1_resources[] = {
259 [0] = {
260 .name = "VEU1",
261 .start = 0xfe924000,
262 .end = 0xfe9240b7,
263 .flags = IORESOURCE_MEM,
264 },
265};
266
267static struct platform_device veu1_device = {
268 .name = "uio_pdrv_genirq",
269 .id = 2,
270 .dev = {
271 .platform_data = &veu1_platform_data,
272 },
273 .resource = veu1_resources,
274 .num_resources = ARRAY_SIZE(veu1_resources),
275};
276
277/* VEU2 */
278static struct uio_info veu2_platform_data = {
279 .name = "VEU2",
280 .version = "0",
281 .irq = intcs_evt2irq(0x740),
282};
283
284static struct resource veu2_resources[] = {
285 [0] = {
286 .name = "VEU2",
287 .start = 0xfe928000,
288 .end = 0xfe9280b7,
289 .flags = IORESOURCE_MEM,
290 },
291};
292
293static struct platform_device veu2_device = {
294 .name = "uio_pdrv_genirq",
295 .id = 3,
296 .dev = {
297 .platform_data = &veu2_platform_data,
298 },
299 .resource = veu2_resources,
300 .num_resources = ARRAY_SIZE(veu2_resources),
301};
302
303/* VEU3 */
304static struct uio_info veu3_platform_data = {
305 .name = "VEU3",
306 .version = "0",
307 .irq = intcs_evt2irq(0x760),
308};
309
310static struct resource veu3_resources[] = {
311 [0] = {
312 .name = "VEU3",
313 .start = 0xfe92c000,
314 .end = 0xfe92c0b7,
315 .flags = IORESOURCE_MEM,
316 },
317};
318
319static struct platform_device veu3_device = {
320 .name = "uio_pdrv_genirq",
321 .id = 4,
322 .dev = {
323 .platform_data = &veu3_platform_data,
324 },
325 .resource = veu3_resources,
326 .num_resources = ARRAY_SIZE(veu3_resources),
327};
328
329/* VEU2H */
330static struct uio_info veu2h_platform_data = {
331 .name = "VEU2H",
332 .version = "0",
333 .irq = intcs_evt2irq(0x520),
334};
335
336static struct resource veu2h_resources[] = {
337 [0] = {
338 .name = "VEU2H",
339 .start = 0xfe93c000,
340 .end = 0xfe93c27b,
341 .flags = IORESOURCE_MEM,
342 },
343};
344
345static struct platform_device veu2h_device = {
346 .name = "uio_pdrv_genirq",
347 .id = 5,
348 .dev = {
349 .platform_data = &veu2h_platform_data,
350 },
351 .resource = veu2h_resources,
352 .num_resources = ARRAY_SIZE(veu2h_resources),
353};
354
355/* JPU */
356static struct uio_info jpu_platform_data = {
357 .name = "JPU",
358 .version = "0",
359 .irq = intcs_evt2irq(0x560),
360};
361
362static struct resource jpu_resources[] = {
363 [0] = {
364 .name = "JPU",
365 .start = 0xfe980000,
366 .end = 0xfe9902d3,
367 .flags = IORESOURCE_MEM,
368 },
369};
370
371static struct platform_device jpu_device = {
372 .name = "uio_pdrv_genirq",
373 .id = 6,
374 .dev = {
375 .platform_data = &jpu_platform_data,
376 },
377 .resource = jpu_resources,
378 .num_resources = ARRAY_SIZE(jpu_resources),
379};
380
381/* SPU1 */
382static struct uio_info spu1_platform_data = {
383 .name = "SPU1",
384 .version = "0",
385 .irq = evt2irq(0xfc0),
386};
387
388static struct resource spu1_resources[] = {
389 [0] = {
390 .name = "SPU1",
391 .start = 0xfe300000,
392 .end = 0xfe3fffff,
393 .flags = IORESOURCE_MEM,
394 },
395};
396
397static struct platform_device spu1_device = {
398 .name = "uio_pdrv_genirq",
399 .id = 7,
400 .dev = {
401 .platform_data = &spu1_platform_data,
402 },
403 .resource = spu1_resources,
404 .num_resources = ARRAY_SIZE(spu1_resources),
405};
406
198static struct platform_device *sh7367_early_devices[] __initdata = { 407static struct platform_device *sh7367_early_devices[] __initdata = {
199 &scif0_device, 408 &scif0_device,
200 &scif1_device, 409 &scif1_device,
@@ -206,10 +415,24 @@ static struct platform_device *sh7367_early_devices[] __initdata = {
206 &cmt10_device, 415 &cmt10_device,
207}; 416};
208 417
418static struct platform_device *sh7367_devices[] __initdata = {
419 &vpu_device,
420 &veu0_device,
421 &veu1_device,
422 &veu2_device,
423 &veu3_device,
424 &veu2h_device,
425 &jpu_device,
426 &spu1_device,
427};
428
209void __init sh7367_add_standard_devices(void) 429void __init sh7367_add_standard_devices(void)
210{ 430{
211 platform_add_devices(sh7367_early_devices, 431 platform_add_devices(sh7367_early_devices,
212 ARRAY_SIZE(sh7367_early_devices)); 432 ARRAY_SIZE(sh7367_early_devices));
433
434 platform_add_devices(sh7367_devices,
435 ARRAY_SIZE(sh7367_devices));
213} 436}
214 437
215#define SYMSTPCR2 0xe6158048 438#define SYMSTPCR2 0xe6158048
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index ff0494f3d00c..cd807eea69e2 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/uio_driver.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/input.h> 27#include <linux/input.h>
27#include <linux/io.h> 28#include <linux/io.h>
@@ -601,6 +602,214 @@ static struct platform_device dma2_device = {
601 }, 602 },
602}; 603};
603 604
605/* VPU */
606static struct uio_info vpu_platform_data = {
607 .name = "VPU5HG",
608 .version = "0",
609 .irq = intcs_evt2irq(0x980),
610};
611
612static struct resource vpu_resources[] = {
613 [0] = {
614 .name = "VPU",
615 .start = 0xfe900000,
616 .end = 0xfe900157,
617 .flags = IORESOURCE_MEM,
618 },
619};
620
621static struct platform_device vpu_device = {
622 .name = "uio_pdrv_genirq",
623 .id = 0,
624 .dev = {
625 .platform_data = &vpu_platform_data,
626 },
627 .resource = vpu_resources,
628 .num_resources = ARRAY_SIZE(vpu_resources),
629};
630
631/* VEU0 */
632static struct uio_info veu0_platform_data = {
633 .name = "VEU0",
634 .version = "0",
635 .irq = intcs_evt2irq(0x700),
636};
637
638static struct resource veu0_resources[] = {
639 [0] = {
640 .name = "VEU0",
641 .start = 0xfe920000,
642 .end = 0xfe9200cb,
643 .flags = IORESOURCE_MEM,
644 },
645};
646
647static struct platform_device veu0_device = {
648 .name = "uio_pdrv_genirq",
649 .id = 1,
650 .dev = {
651 .platform_data = &veu0_platform_data,
652 },
653 .resource = veu0_resources,
654 .num_resources = ARRAY_SIZE(veu0_resources),
655};
656
657/* VEU1 */
658static struct uio_info veu1_platform_data = {
659 .name = "VEU1",
660 .version = "0",
661 .irq = intcs_evt2irq(0x720),
662};
663
664static struct resource veu1_resources[] = {
665 [0] = {
666 .name = "VEU1",
667 .start = 0xfe924000,
668 .end = 0xfe9240cb,
669 .flags = IORESOURCE_MEM,
670 },
671};
672
673static struct platform_device veu1_device = {
674 .name = "uio_pdrv_genirq",
675 .id = 2,
676 .dev = {
677 .platform_data = &veu1_platform_data,
678 },
679 .resource = veu1_resources,
680 .num_resources = ARRAY_SIZE(veu1_resources),
681};
682
683/* VEU2 */
684static struct uio_info veu2_platform_data = {
685 .name = "VEU2",
686 .version = "0",
687 .irq = intcs_evt2irq(0x740),
688};
689
690static struct resource veu2_resources[] = {
691 [0] = {
692 .name = "VEU2",
693 .start = 0xfe928000,
694 .end = 0xfe928307,
695 .flags = IORESOURCE_MEM,
696 },
697};
698
699static struct platform_device veu2_device = {
700 .name = "uio_pdrv_genirq",
701 .id = 3,
702 .dev = {
703 .platform_data = &veu2_platform_data,
704 },
705 .resource = veu2_resources,
706 .num_resources = ARRAY_SIZE(veu2_resources),
707};
708
709/* VEU3 */
710static struct uio_info veu3_platform_data = {
711 .name = "VEU3",
712 .version = "0",
713 .irq = intcs_evt2irq(0x760),
714};
715
716static struct resource veu3_resources[] = {
717 [0] = {
718 .name = "VEU3",
719 .start = 0xfe92c000,
720 .end = 0xfe92c307,
721 .flags = IORESOURCE_MEM,
722 },
723};
724
725static struct platform_device veu3_device = {
726 .name = "uio_pdrv_genirq",
727 .id = 4,
728 .dev = {
729 .platform_data = &veu3_platform_data,
730 },
731 .resource = veu3_resources,
732 .num_resources = ARRAY_SIZE(veu3_resources),
733};
734
735/* JPU */
736static struct uio_info jpu_platform_data = {
737 .name = "JPU",
738 .version = "0",
739 .irq = intcs_evt2irq(0x560),
740};
741
742static struct resource jpu_resources[] = {
743 [0] = {
744 .name = "JPU",
745 .start = 0xfe980000,
746 .end = 0xfe9902d3,
747 .flags = IORESOURCE_MEM,
748 },
749};
750
751static struct platform_device jpu_device = {
752 .name = "uio_pdrv_genirq",
753 .id = 5,
754 .dev = {
755 .platform_data = &jpu_platform_data,
756 },
757 .resource = jpu_resources,
758 .num_resources = ARRAY_SIZE(jpu_resources),
759};
760
761/* SPU2DSP0 */
762static struct uio_info spu0_platform_data = {
763 .name = "SPU2DSP0",
764 .version = "0",
765 .irq = evt2irq(0x1800),
766};
767
768static struct resource spu0_resources[] = {
769 [0] = {
770 .name = "SPU2DSP0",
771 .start = 0xfe200000,
772 .end = 0xfe2fffff,
773 .flags = IORESOURCE_MEM,
774 },
775};
776
777static struct platform_device spu0_device = {
778 .name = "uio_pdrv_genirq",
779 .id = 6,
780 .dev = {
781 .platform_data = &spu0_platform_data,
782 },
783 .resource = spu0_resources,
784 .num_resources = ARRAY_SIZE(spu0_resources),
785};
786
787/* SPU2DSP1 */
788static struct uio_info spu1_platform_data = {
789 .name = "SPU2DSP1",
790 .version = "0",
791 .irq = evt2irq(0x1820),
792};
793
794static struct resource spu1_resources[] = {
795 [0] = {
796 .name = "SPU2DSP1",
797 .start = 0xfe300000,
798 .end = 0xfe3fffff,
799 .flags = IORESOURCE_MEM,
800 },
801};
802
803static struct platform_device spu1_device = {
804 .name = "uio_pdrv_genirq",
805 .id = 7,
806 .dev = {
807 .platform_data = &spu1_platform_data,
808 },
809 .resource = spu1_resources,
810 .num_resources = ARRAY_SIZE(spu1_resources),
811};
812
604static struct platform_device *sh7372_early_devices[] __initdata = { 813static struct platform_device *sh7372_early_devices[] __initdata = {
605 &scif0_device, 814 &scif0_device,
606 &scif1_device, 815 &scif1_device,
@@ -620,6 +829,14 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
620 &dma0_device, 829 &dma0_device,
621 &dma1_device, 830 &dma1_device,
622 &dma2_device, 831 &dma2_device,
832 &vpu_device,
833 &veu0_device,
834 &veu1_device,
835 &veu2_device,
836 &veu3_device,
837 &jpu_device,
838 &spu0_device,
839 &spu1_device,
623}; 840};
624 841
625void __init sh7372_add_standard_devices(void) 842void __init sh7372_add_standard_devices(void)
diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c
index 8099b0b8a934..bb405b8e459b 100644
--- a/arch/arm/mach-shmobile/setup-sh7377.c
+++ b/arch/arm/mach-shmobile/setup-sh7377.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/uio_driver.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/input.h> 27#include <linux/input.h>
27#include <linux/io.h> 28#include <linux/io.h>
@@ -38,7 +39,7 @@ static struct plat_sci_port scif0_platform_data = {
38 .flags = UPF_BOOT_AUTOCONF, 39 .flags = UPF_BOOT_AUTOCONF,
39 .scscr = SCSCR_RE | SCSCR_TE, 40 .scscr = SCSCR_RE | SCSCR_TE,
40 .scbrr_algo_id = SCBRR_ALGO_4, 41 .scbrr_algo_id = SCBRR_ALGO_4,
41 .type = PORT_SCIF, 42 .type = PORT_SCIFA,
42 .irqs = { evt2irq(0xc00), evt2irq(0xc00), 43 .irqs = { evt2irq(0xc00), evt2irq(0xc00),
43 evt2irq(0xc00), evt2irq(0xc00) }, 44 evt2irq(0xc00), evt2irq(0xc00) },
44}; 45};
@@ -57,7 +58,7 @@ static struct plat_sci_port scif1_platform_data = {
57 .flags = UPF_BOOT_AUTOCONF, 58 .flags = UPF_BOOT_AUTOCONF,
58 .scscr = SCSCR_RE | SCSCR_TE, 59 .scscr = SCSCR_RE | SCSCR_TE,
59 .scbrr_algo_id = SCBRR_ALGO_4, 60 .scbrr_algo_id = SCBRR_ALGO_4,
60 .type = PORT_SCIF, 61 .type = PORT_SCIFA,
61 .irqs = { evt2irq(0xc20), evt2irq(0xc20), 62 .irqs = { evt2irq(0xc20), evt2irq(0xc20),
62 evt2irq(0xc20), evt2irq(0xc20) }, 63 evt2irq(0xc20), evt2irq(0xc20) },
63}; 64};
@@ -76,7 +77,7 @@ static struct plat_sci_port scif2_platform_data = {
76 .flags = UPF_BOOT_AUTOCONF, 77 .flags = UPF_BOOT_AUTOCONF,
77 .scscr = SCSCR_RE | SCSCR_TE, 78 .scscr = SCSCR_RE | SCSCR_TE,
78 .scbrr_algo_id = SCBRR_ALGO_4, 79 .scbrr_algo_id = SCBRR_ALGO_4,
79 .type = PORT_SCIF, 80 .type = PORT_SCIFA,
80 .irqs = { evt2irq(0xc40), evt2irq(0xc40), 81 .irqs = { evt2irq(0xc40), evt2irq(0xc40),
81 evt2irq(0xc40), evt2irq(0xc40) }, 82 evt2irq(0xc40), evt2irq(0xc40) },
82}; 83};
@@ -95,7 +96,7 @@ static struct plat_sci_port scif3_platform_data = {
95 .flags = UPF_BOOT_AUTOCONF, 96 .flags = UPF_BOOT_AUTOCONF,
96 .scscr = SCSCR_RE | SCSCR_TE, 97 .scscr = SCSCR_RE | SCSCR_TE,
97 .scbrr_algo_id = SCBRR_ALGO_4, 98 .scbrr_algo_id = SCBRR_ALGO_4,
98 .type = PORT_SCIF, 99 .type = PORT_SCIFA,
99 .irqs = { evt2irq(0xc60), evt2irq(0xc60), 100 .irqs = { evt2irq(0xc60), evt2irq(0xc60),
100 evt2irq(0xc60), evt2irq(0xc60) }, 101 evt2irq(0xc60), evt2irq(0xc60) },
101}; 102};
@@ -114,7 +115,7 @@ static struct plat_sci_port scif4_platform_data = {
114 .flags = UPF_BOOT_AUTOCONF, 115 .flags = UPF_BOOT_AUTOCONF,
115 .scscr = SCSCR_RE | SCSCR_TE, 116 .scscr = SCSCR_RE | SCSCR_TE,
116 .scbrr_algo_id = SCBRR_ALGO_4, 117 .scbrr_algo_id = SCBRR_ALGO_4,
117 .type = PORT_SCIF, 118 .type = PORT_SCIFA,
118 .irqs = { evt2irq(0xd20), evt2irq(0xd20), 119 .irqs = { evt2irq(0xd20), evt2irq(0xd20),
119 evt2irq(0xd20), evt2irq(0xd20) }, 120 evt2irq(0xd20), evt2irq(0xd20) },
120}; 121};
@@ -133,7 +134,7 @@ static struct plat_sci_port scif5_platform_data = {
133 .flags = UPF_BOOT_AUTOCONF, 134 .flags = UPF_BOOT_AUTOCONF,
134 .scscr = SCSCR_RE | SCSCR_TE, 135 .scscr = SCSCR_RE | SCSCR_TE,
135 .scbrr_algo_id = SCBRR_ALGO_4, 136 .scbrr_algo_id = SCBRR_ALGO_4,
136 .type = PORT_SCIF, 137 .type = PORT_SCIFA,
137 .irqs = { evt2irq(0xd40), evt2irq(0xd40), 138 .irqs = { evt2irq(0xd40), evt2irq(0xd40),
138 evt2irq(0xd40), evt2irq(0xd40) }, 139 evt2irq(0xd40), evt2irq(0xd40) },
139}; 140};
@@ -152,7 +153,7 @@ static struct plat_sci_port scif6_platform_data = {
152 .flags = UPF_BOOT_AUTOCONF, 153 .flags = UPF_BOOT_AUTOCONF,
153 .scscr = SCSCR_RE | SCSCR_TE, 154 .scscr = SCSCR_RE | SCSCR_TE,
154 .scbrr_algo_id = SCBRR_ALGO_4, 155 .scbrr_algo_id = SCBRR_ALGO_4,
155 .type = PORT_SCIF, 156 .type = PORT_SCIFA,
156 .irqs = { intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80), 157 .irqs = { intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80),
157 intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80) }, 158 intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80) },
158}; 159};
@@ -171,7 +172,7 @@ static struct plat_sci_port scif7_platform_data = {
171 .flags = UPF_BOOT_AUTOCONF, 172 .flags = UPF_BOOT_AUTOCONF,
172 .scscr = SCSCR_RE | SCSCR_TE, 173 .scscr = SCSCR_RE | SCSCR_TE,
173 .scbrr_algo_id = SCBRR_ALGO_4, 174 .scbrr_algo_id = SCBRR_ALGO_4,
174 .type = PORT_SCIF, 175 .type = PORT_SCIFB,
175 .irqs = { evt2irq(0xd60), evt2irq(0xd60), 176 .irqs = { evt2irq(0xd60), evt2irq(0xd60),
176 evt2irq(0xd60), evt2irq(0xd60) }, 177 evt2irq(0xd60), evt2irq(0xd60) },
177}; 178};
@@ -215,6 +216,214 @@ static struct platform_device cmt10_device = {
215 .num_resources = ARRAY_SIZE(cmt10_resources), 216 .num_resources = ARRAY_SIZE(cmt10_resources),
216}; 217};
217 218
219/* VPU */
220static struct uio_info vpu_platform_data = {
221 .name = "VPU5HG",
222 .version = "0",
223 .irq = intcs_evt2irq(0x980),
224};
225
226static struct resource vpu_resources[] = {
227 [0] = {
228 .name = "VPU",
229 .start = 0xfe900000,
230 .end = 0xfe900157,
231 .flags = IORESOURCE_MEM,
232 },
233};
234
235static struct platform_device vpu_device = {
236 .name = "uio_pdrv_genirq",
237 .id = 0,
238 .dev = {
239 .platform_data = &vpu_platform_data,
240 },
241 .resource = vpu_resources,
242 .num_resources = ARRAY_SIZE(vpu_resources),
243};
244
245/* VEU0 */
246static struct uio_info veu0_platform_data = {
247 .name = "VEU0",
248 .version = "0",
249 .irq = intcs_evt2irq(0x700),
250};
251
252static struct resource veu0_resources[] = {
253 [0] = {
254 .name = "VEU0",
255 .start = 0xfe920000,
256 .end = 0xfe9200cb,
257 .flags = IORESOURCE_MEM,
258 },
259};
260
261static struct platform_device veu0_device = {
262 .name = "uio_pdrv_genirq",
263 .id = 1,
264 .dev = {
265 .platform_data = &veu0_platform_data,
266 },
267 .resource = veu0_resources,
268 .num_resources = ARRAY_SIZE(veu0_resources),
269};
270
271/* VEU1 */
272static struct uio_info veu1_platform_data = {
273 .name = "VEU1",
274 .version = "0",
275 .irq = intcs_evt2irq(0x720),
276};
277
278static struct resource veu1_resources[] = {
279 [0] = {
280 .name = "VEU1",
281 .start = 0xfe924000,
282 .end = 0xfe9240cb,
283 .flags = IORESOURCE_MEM,
284 },
285};
286
287static struct platform_device veu1_device = {
288 .name = "uio_pdrv_genirq",
289 .id = 2,
290 .dev = {
291 .platform_data = &veu1_platform_data,
292 },
293 .resource = veu1_resources,
294 .num_resources = ARRAY_SIZE(veu1_resources),
295};
296
297/* VEU2 */
298static struct uio_info veu2_platform_data = {
299 .name = "VEU2",
300 .version = "0",
301 .irq = intcs_evt2irq(0x740),
302};
303
304static struct resource veu2_resources[] = {
305 [0] = {
306 .name = "VEU2",
307 .start = 0xfe928000,
308 .end = 0xfe928307,
309 .flags = IORESOURCE_MEM,
310 },
311};
312
313static struct platform_device veu2_device = {
314 .name = "uio_pdrv_genirq",
315 .id = 3,
316 .dev = {
317 .platform_data = &veu2_platform_data,
318 },
319 .resource = veu2_resources,
320 .num_resources = ARRAY_SIZE(veu2_resources),
321};
322
323/* VEU3 */
324static struct uio_info veu3_platform_data = {
325 .name = "VEU3",
326 .version = "0",
327 .irq = intcs_evt2irq(0x760),
328};
329
330static struct resource veu3_resources[] = {
331 [0] = {
332 .name = "VEU3",
333 .start = 0xfe92c000,
334 .end = 0xfe92c307,
335 .flags = IORESOURCE_MEM,
336 },
337};
338
339static struct platform_device veu3_device = {
340 .name = "uio_pdrv_genirq",
341 .id = 4,
342 .dev = {
343 .platform_data = &veu3_platform_data,
344 },
345 .resource = veu3_resources,
346 .num_resources = ARRAY_SIZE(veu3_resources),
347};
348
349/* JPU */
350static struct uio_info jpu_platform_data = {
351 .name = "JPU",
352 .version = "0",
353 .irq = intcs_evt2irq(0x560),
354};
355
356static struct resource jpu_resources[] = {
357 [0] = {
358 .name = "JPU",
359 .start = 0xfe980000,
360 .end = 0xfe9902d3,
361 .flags = IORESOURCE_MEM,
362 },
363};
364
365static struct platform_device jpu_device = {
366 .name = "uio_pdrv_genirq",
367 .id = 5,
368 .dev = {
369 .platform_data = &jpu_platform_data,
370 },
371 .resource = jpu_resources,
372 .num_resources = ARRAY_SIZE(jpu_resources),
373};
374
375/* SPU2DSP0 */
376static struct uio_info spu0_platform_data = {
377 .name = "SPU2DSP0",
378 .version = "0",
379 .irq = evt2irq(0x1800),
380};
381
382static struct resource spu0_resources[] = {
383 [0] = {
384 .name = "SPU2DSP0",
385 .start = 0xfe200000,
386 .end = 0xfe2fffff,
387 .flags = IORESOURCE_MEM,
388 },
389};
390
391static struct platform_device spu0_device = {
392 .name = "uio_pdrv_genirq",
393 .id = 6,
394 .dev = {
395 .platform_data = &spu0_platform_data,
396 },
397 .resource = spu0_resources,
398 .num_resources = ARRAY_SIZE(spu0_resources),
399};
400
401/* SPU2DSP1 */
402static struct uio_info spu1_platform_data = {
403 .name = "SPU2DSP1",
404 .version = "0",
405 .irq = evt2irq(0x1820),
406};
407
408static struct resource spu1_resources[] = {
409 [0] = {
410 .name = "SPU2DSP1",
411 .start = 0xfe300000,
412 .end = 0xfe3fffff,
413 .flags = IORESOURCE_MEM,
414 },
415};
416
417static struct platform_device spu1_device = {
418 .name = "uio_pdrv_genirq",
419 .id = 7,
420 .dev = {
421 .platform_data = &spu1_platform_data,
422 },
423 .resource = spu1_resources,
424 .num_resources = ARRAY_SIZE(spu1_resources),
425};
426
218static struct platform_device *sh7377_early_devices[] __initdata = { 427static struct platform_device *sh7377_early_devices[] __initdata = {
219 &scif0_device, 428 &scif0_device,
220 &scif1_device, 429 &scif1_device,
@@ -227,10 +436,24 @@ static struct platform_device *sh7377_early_devices[] __initdata = {
227 &cmt10_device, 436 &cmt10_device,
228}; 437};
229 438
439static struct platform_device *sh7377_devices[] __initdata = {
440 &vpu_device,
441 &veu0_device,
442 &veu1_device,
443 &veu2_device,
444 &veu3_device,
445 &jpu_device,
446 &spu0_device,
447 &spu1_device,
448};
449
230void __init sh7377_add_standard_devices(void) 450void __init sh7377_add_standard_devices(void)
231{ 451{
232 platform_add_devices(sh7377_early_devices, 452 platform_add_devices(sh7377_early_devices,
233 ARRAY_SIZE(sh7377_early_devices)); 453 ARRAY_SIZE(sh7377_early_devices));
454
455 platform_add_devices(sh7377_devices,
456 ARRAY_SIZE(sh7377_devices));
234} 457}
235 458
236#define SMSTPCR3 0xe615013c 459#define SMSTPCR3 0xe615013c
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 685c40a2f5e6..e46821c0a62e 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -27,9 +27,11 @@
27#include <linux/input.h> 27#include <linux/input.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/serial_sci.h> 29#include <linux/serial_sci.h>
30#include <linux/sh_dma.h>
30#include <linux/sh_intc.h> 31#include <linux/sh_intc.h>
31#include <linux/sh_timer.h> 32#include <linux/sh_timer.h>
32#include <mach/hardware.h> 33#include <mach/hardware.h>
34#include <mach/sh73a0.h>
33#include <asm/mach-types.h> 35#include <asm/mach-types.h>
34#include <asm/mach/arch.h> 36#include <asm/mach/arch.h>
35 37
@@ -392,6 +394,242 @@ static struct platform_device i2c4_device = {
392 .num_resources = ARRAY_SIZE(i2c4_resources), 394 .num_resources = ARRAY_SIZE(i2c4_resources),
393}; 395};
394 396
397/* Transmit sizes and respective CHCR register values */
398enum {
399 XMIT_SZ_8BIT = 0,
400 XMIT_SZ_16BIT = 1,
401 XMIT_SZ_32BIT = 2,
402 XMIT_SZ_64BIT = 7,
403 XMIT_SZ_128BIT = 3,
404 XMIT_SZ_256BIT = 4,
405 XMIT_SZ_512BIT = 5,
406};
407
408/* log2(size / 8) - used to calculate number of transfers */
409#define TS_SHIFT { \
410 [XMIT_SZ_8BIT] = 0, \
411 [XMIT_SZ_16BIT] = 1, \
412 [XMIT_SZ_32BIT] = 2, \
413 [XMIT_SZ_64BIT] = 3, \
414 [XMIT_SZ_128BIT] = 4, \
415 [XMIT_SZ_256BIT] = 5, \
416 [XMIT_SZ_512BIT] = 6, \
417}
418
419#define TS_INDEX2VAL(i) ((((i) & 3) << 3) | (((i) & 0xc) << (20 - 2)))
420#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
421#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
422
423static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = {
424 {
425 .slave_id = SHDMA_SLAVE_SCIF0_TX,
426 .addr = 0xe6c40020,
427 .chcr = CHCR_TX(XMIT_SZ_8BIT),
428 .mid_rid = 0x21,
429 }, {
430 .slave_id = SHDMA_SLAVE_SCIF0_RX,
431 .addr = 0xe6c40024,
432 .chcr = CHCR_RX(XMIT_SZ_8BIT),
433 .mid_rid = 0x22,
434 }, {
435 .slave_id = SHDMA_SLAVE_SCIF1_TX,
436 .addr = 0xe6c50020,
437 .chcr = CHCR_TX(XMIT_SZ_8BIT),
438 .mid_rid = 0x25,
439 }, {
440 .slave_id = SHDMA_SLAVE_SCIF1_RX,
441 .addr = 0xe6c50024,
442 .chcr = CHCR_RX(XMIT_SZ_8BIT),
443 .mid_rid = 0x26,
444 }, {
445 .slave_id = SHDMA_SLAVE_SCIF2_TX,
446 .addr = 0xe6c60020,
447 .chcr = CHCR_TX(XMIT_SZ_8BIT),
448 .mid_rid = 0x29,
449 }, {
450 .slave_id = SHDMA_SLAVE_SCIF2_RX,
451 .addr = 0xe6c60024,
452 .chcr = CHCR_RX(XMIT_SZ_8BIT),
453 .mid_rid = 0x2a,
454 }, {
455 .slave_id = SHDMA_SLAVE_SCIF3_TX,
456 .addr = 0xe6c70020,
457 .chcr = CHCR_TX(XMIT_SZ_8BIT),
458 .mid_rid = 0x2d,
459 }, {
460 .slave_id = SHDMA_SLAVE_SCIF3_RX,
461 .addr = 0xe6c70024,
462 .chcr = CHCR_RX(XMIT_SZ_8BIT),
463 .mid_rid = 0x2e,
464 }, {
465 .slave_id = SHDMA_SLAVE_SCIF4_TX,
466 .addr = 0xe6c80020,
467 .chcr = CHCR_TX(XMIT_SZ_8BIT),
468 .mid_rid = 0x39,
469 }, {
470 .slave_id = SHDMA_SLAVE_SCIF4_RX,
471 .addr = 0xe6c80024,
472 .chcr = CHCR_RX(XMIT_SZ_8BIT),
473 .mid_rid = 0x3a,
474 }, {
475 .slave_id = SHDMA_SLAVE_SCIF5_TX,
476 .addr = 0xe6cb0020,
477 .chcr = CHCR_TX(XMIT_SZ_8BIT),
478 .mid_rid = 0x35,
479 }, {
480 .slave_id = SHDMA_SLAVE_SCIF5_RX,
481 .addr = 0xe6cb0024,
482 .chcr = CHCR_RX(XMIT_SZ_8BIT),
483 .mid_rid = 0x36,
484 }, {
485 .slave_id = SHDMA_SLAVE_SCIF6_TX,
486 .addr = 0xe6cc0020,
487 .chcr = CHCR_TX(XMIT_SZ_8BIT),
488 .mid_rid = 0x1d,
489 }, {
490 .slave_id = SHDMA_SLAVE_SCIF6_RX,
491 .addr = 0xe6cc0024,
492 .chcr = CHCR_RX(XMIT_SZ_8BIT),
493 .mid_rid = 0x1e,
494 }, {
495 .slave_id = SHDMA_SLAVE_SCIF7_TX,
496 .addr = 0xe6cd0020,
497 .chcr = CHCR_TX(XMIT_SZ_8BIT),
498 .mid_rid = 0x19,
499 }, {
500 .slave_id = SHDMA_SLAVE_SCIF7_RX,
501 .addr = 0xe6cd0024,
502 .chcr = CHCR_RX(XMIT_SZ_8BIT),
503 .mid_rid = 0x1a,
504 }, {
505 .slave_id = SHDMA_SLAVE_SCIF8_TX,
506 .addr = 0xe6c30040,
507 .chcr = CHCR_TX(XMIT_SZ_8BIT),
508 .mid_rid = 0x3d,
509 }, {
510 .slave_id = SHDMA_SLAVE_SCIF8_RX,
511 .addr = 0xe6c30060,
512 .chcr = CHCR_RX(XMIT_SZ_8BIT),
513 .mid_rid = 0x3e,
514 }, {
515 .slave_id = SHDMA_SLAVE_SDHI0_TX,
516 .addr = 0xee100030,
517 .chcr = CHCR_TX(XMIT_SZ_16BIT),
518 .mid_rid = 0xc1,
519 }, {
520 .slave_id = SHDMA_SLAVE_SDHI0_RX,
521 .addr = 0xee100030,
522 .chcr = CHCR_RX(XMIT_SZ_16BIT),
523 .mid_rid = 0xc2,
524 }, {
525 .slave_id = SHDMA_SLAVE_SDHI1_TX,
526 .addr = 0xee120030,
527 .chcr = CHCR_TX(XMIT_SZ_16BIT),
528 .mid_rid = 0xc9,
529 }, {
530 .slave_id = SHDMA_SLAVE_SDHI1_RX,
531 .addr = 0xee120030,
532 .chcr = CHCR_RX(XMIT_SZ_16BIT),
533 .mid_rid = 0xca,
534 }, {
535 .slave_id = SHDMA_SLAVE_SDHI2_TX,
536 .addr = 0xee140030,
537 .chcr = CHCR_TX(XMIT_SZ_16BIT),
538 .mid_rid = 0xcd,
539 }, {
540 .slave_id = SHDMA_SLAVE_SDHI2_RX,
541 .addr = 0xee140030,
542 .chcr = CHCR_RX(XMIT_SZ_16BIT),
543 .mid_rid = 0xce,
544 }, {
545 .slave_id = SHDMA_SLAVE_MMCIF_TX,
546 .addr = 0xe6bd0034,
547 .chcr = CHCR_TX(XMIT_SZ_32BIT),
548 .mid_rid = 0xd1,
549 }, {
550 .slave_id = SHDMA_SLAVE_MMCIF_RX,
551 .addr = 0xe6bd0034,
552 .chcr = CHCR_RX(XMIT_SZ_32BIT),
553 .mid_rid = 0xd2,
554 },
555};
556
557#define DMAE_CHANNEL(_offset) \
558 { \
559 .offset = _offset - 0x20, \
560 .dmars = _offset - 0x20 + 0x40, \
561 }
562
563static const struct sh_dmae_channel sh73a0_dmae_channels[] = {
564 DMAE_CHANNEL(0x8000),
565 DMAE_CHANNEL(0x8080),
566 DMAE_CHANNEL(0x8100),
567 DMAE_CHANNEL(0x8180),
568 DMAE_CHANNEL(0x8200),
569 DMAE_CHANNEL(0x8280),
570 DMAE_CHANNEL(0x8300),
571 DMAE_CHANNEL(0x8380),
572 DMAE_CHANNEL(0x8400),
573 DMAE_CHANNEL(0x8480),
574 DMAE_CHANNEL(0x8500),
575 DMAE_CHANNEL(0x8580),
576 DMAE_CHANNEL(0x8600),
577 DMAE_CHANNEL(0x8680),
578 DMAE_CHANNEL(0x8700),
579 DMAE_CHANNEL(0x8780),
580 DMAE_CHANNEL(0x8800),
581 DMAE_CHANNEL(0x8880),
582 DMAE_CHANNEL(0x8900),
583 DMAE_CHANNEL(0x8980),
584};
585
586static const unsigned int ts_shift[] = TS_SHIFT;
587
588static struct sh_dmae_pdata sh73a0_dmae_platform_data = {
589 .slave = sh73a0_dmae_slaves,
590 .slave_num = ARRAY_SIZE(sh73a0_dmae_slaves),
591 .channel = sh73a0_dmae_channels,
592 .channel_num = ARRAY_SIZE(sh73a0_dmae_channels),
593 .ts_low_shift = 3,
594 .ts_low_mask = 0x18,
595 .ts_high_shift = (20 - 2), /* 2 bits for shifted low TS */
596 .ts_high_mask = 0x00300000,
597 .ts_shift = ts_shift,
598 .ts_shift_num = ARRAY_SIZE(ts_shift),
599 .dmaor_init = DMAOR_DME,
600};
601
602static struct resource sh73a0_dmae_resources[] = {
603 {
604 /* Registers including DMAOR and channels including DMARSx */
605 .start = 0xfe000020,
606 .end = 0xfe008a00 - 1,
607 .flags = IORESOURCE_MEM,
608 },
609 {
610 /* DMA error IRQ */
611 .start = gic_spi(129),
612 .end = gic_spi(129),
613 .flags = IORESOURCE_IRQ,
614 },
615 {
616 /* IRQ for channels 0-19 */
617 .start = gic_spi(109),
618 .end = gic_spi(128),
619 .flags = IORESOURCE_IRQ,
620 },
621};
622
623static struct platform_device dma0_device = {
624 .name = "sh-dma-engine",
625 .id = 0,
626 .resource = sh73a0_dmae_resources,
627 .num_resources = ARRAY_SIZE(sh73a0_dmae_resources),
628 .dev = {
629 .platform_data = &sh73a0_dmae_platform_data,
630 },
631};
632
395static struct platform_device *sh73a0_early_devices[] __initdata = { 633static struct platform_device *sh73a0_early_devices[] __initdata = {
396 &scif0_device, 634 &scif0_device,
397 &scif1_device, 635 &scif1_device,
@@ -413,10 +651,16 @@ static struct platform_device *sh73a0_late_devices[] __initdata = {
413 &i2c2_device, 651 &i2c2_device,
414 &i2c3_device, 652 &i2c3_device,
415 &i2c4_device, 653 &i2c4_device,
654 &dma0_device,
416}; 655};
417 656
657#define SRCR2 0xe61580b0
658
418void __init sh73a0_add_standard_devices(void) 659void __init sh73a0_add_standard_devices(void)
419{ 660{
661 /* Clear software reset bit on SY-DMAC module */
662 __raw_writel(__raw_readl(SRCR2) & ~(1 << 18), SRCR2);
663
420 platform_add_devices(sh73a0_early_devices, 664 platform_add_devices(sh73a0_early_devices,
421 ARRAY_SIZE(sh73a0_early_devices)); 665 ARRAY_SIZE(sh73a0_early_devices));
422 platform_add_devices(sh73a0_late_devices, 666 platform_add_devices(sh73a0_late_devices,
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
new file mode 100644
index 000000000000..d37d3ca4d18f
--- /dev/null
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -0,0 +1,260 @@
1/*
2 * sh7372 lowlevel sleep code for "Core Standby Mode"
3 *
4 * Copyright (C) 2011 Magnus Damm
5 *
6 * In "Core Standby Mode" the ARM core is off, but L2 cache is still on
7 *
8 * Based on mach-omap2/sleep34xx.S
9 *
10 * (C) Copyright 2007 Texas Instruments
11 * Karthik Dasu <karthik-dp@ti.com>
12 *
13 * (C) Copyright 2004 Texas Instruments, <www.ti.com>
14 * Richard Woodruff <r-woodruff2@ti.com>
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
29 * MA 02111-1307 USA
30 */
31
32#include <linux/linkage.h>
33#include <asm/assembler.h>
34
35#define SMFRAM 0xe6a70000
36
37 .align
38kernel_flush:
39 .word v7_flush_dcache_all
40
41 .align 3
42ENTRY(sh7372_cpu_suspend)
43 stmfd sp!, {r0-r12, lr} @ save registers on stack
44
45 ldr r8, =SMFRAM
46
47 mov r4, sp @ Store sp
48 mrs r5, spsr @ Store spsr
49 mov r6, lr @ Store lr
50 stmia r8!, {r4-r6}
51
52 mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
53 mrc p15, 0, r5, c2, c0, 0 @ TTBR0
54 mrc p15, 0, r6, c2, c0, 1 @ TTBR1
55 mrc p15, 0, r7, c2, c0, 2 @ TTBCR
56 stmia r8!, {r4-r7}
57
58 mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
59 mrc p15, 0, r5, c10, c2, 0 @ PRRR
60 mrc p15, 0, r6, c10, c2, 1 @ NMRR
61 stmia r8!,{r4-r6}
62
63 mrc p15, 0, r4, c13, c0, 1 @ Context ID
64 mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
65 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
66 mrs r7, cpsr @ Store current cpsr
67 stmia r8!, {r4-r7}
68
69 mrc p15, 0, r4, c1, c0, 0 @ save control register
70 stmia r8!, {r4}
71
72 /*
73 * jump out to kernel flush routine
74 * - reuse that code is better
75 * - it executes in a cached space so is faster than refetch per-block
76 * - should be faster and will change with kernel
77 * - 'might' have to copy address, load and jump to it
78 * Flush all data from the L1 data cache before disabling
79 * SCTLR.C bit.
80 */
81 ldr r1, kernel_flush
82 mov lr, pc
83 bx r1
84
85 /*
86 * Clear the SCTLR.C bit to prevent further data cache
87 * allocation. Clearing SCTLR.C would make all the data accesses
88 * strongly ordered and would not hit the cache.
89 */
90 mrc p15, 0, r0, c1, c0, 0
91 bic r0, r0, #(1 << 2) @ Disable the C bit
92 mcr p15, 0, r0, c1, c0, 0
93 isb
94
95 /*
96 * Invalidate L1 data cache. Even though only invalidate is
97 * necessary exported flush API is used here. Doing clean
98 * on already clean cache would be almost NOP.
99 */
100 ldr r1, kernel_flush
101 blx r1
102 /*
103 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
104 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
105 * This sequence switches back to ARM. Note that .align may insert a
106 * nop: bx pc needs to be word-aligned in order to work.
107 */
108 THUMB( .thumb )
109 THUMB( .align )
110 THUMB( bx pc )
111 THUMB( nop )
112 .arm
113
114 /* Data memory barrier and Data sync barrier */
115 dsb
116 dmb
117
118/*
119 * ===================================
120 * == WFI instruction => Enter idle ==
121 * ===================================
122 */
123 wfi @ wait for interrupt
124
125/*
126 * ===================================
127 * == Resume path for non-OFF modes ==
128 * ===================================
129 */
130 mrc p15, 0, r0, c1, c0, 0
131 tst r0, #(1 << 2) @ Check C bit enabled?
132 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
133 mcreq p15, 0, r0, c1, c0, 0
134 isb
135
136/*
137 * ===================================
138 * == Exit point from non-OFF modes ==
139 * ===================================
140 */
141 ldmfd sp!, {r0-r12, pc} @ restore regs and return
142
143 .pool
144
145 .align 12
146 .text
147 .global sh7372_cpu_resume
148sh7372_cpu_resume:
149
150 mov r1, #0
151 /*
152 * Invalidate all instruction caches to PoU
153 * and flush branch target cache
154 */
155 mcr p15, 0, r1, c7, c5, 0
156
157 ldr r3, =SMFRAM
158
159 ldmia r3!, {r4-r6}
160 mov sp, r4 @ Restore sp
161 msr spsr_cxsf, r5 @ Restore spsr
162 mov lr, r6 @ Restore lr
163
164 ldmia r3!, {r4-r7}
165 mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
166 mcr p15, 0, r5, c2, c0, 0 @ TTBR0
167 mcr p15, 0, r6, c2, c0, 1 @ TTBR1
168 mcr p15, 0, r7, c2, c0, 2 @ TTBCR
169
170 ldmia r3!,{r4-r6}
171 mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
172 mcr p15, 0, r5, c10, c2, 0 @ PRRR
173 mcr p15, 0, r6, c10, c2, 1 @ NMRR
174
175 ldmia r3!,{r4-r7}
176 mcr p15, 0, r4, c13, c0, 1 @ Context ID
177 mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
178 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
179 msr cpsr, r7 @ store cpsr
180
181 /* Starting to enable MMU here */
182 mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
183 /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
184 and r7, #0x7
185 cmp r7, #0x0
186 beq usettbr0
187ttbr_error:
188 /*
189 * More work needs to be done to support N[0:2] value other than 0
190 * So looping here so that the error can be detected
191 */
192 b ttbr_error
193
194 .align
195cache_pred_disable_mask:
196 .word 0xFFFFE7FB
197ttbrbit_mask:
198 .word 0xFFFFC000
199table_index_mask:
200 .word 0xFFF00000
201table_entry:
202 .word 0x00000C02
203usettbr0:
204
205 mrc p15, 0, r2, c2, c0, 0
206 ldr r5, ttbrbit_mask
207 and r2, r5
208 mov r4, pc
209 ldr r5, table_index_mask
210 and r4, r5 @ r4 = 31 to 20 bits of pc
211 /* Extract the value to be written to table entry */
212 ldr r6, table_entry
213 /* r6 has the value to be written to table entry */
214 add r6, r6, r4
215 /* Getting the address of table entry to modify */
216 lsr r4, #18
217 /* r2 has the location which needs to be modified */
218 add r2, r4
219 ldr r4, [r2]
220 str r6, [r2] /* modify the table entry */
221
222 mov r7, r6
223 mov r5, r2
224 mov r6, r4
225 /* r5 = original page table address */
226 /* r6 = original page table data */
227
228 mov r0, #0
229 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
230 mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
231 mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
232 mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
233
234 /*
235 * Restore control register. This enables the MMU.
236 * The caches and prediction are not enabled here, they
237 * will be enabled after restoring the MMU table entry.
238 */
239 ldmia r3!, {r4}
240 stmia r3!, {r5} /* save original page table address */
241 stmia r3!, {r6} /* save original page table data */
242 stmia r3!, {r7} /* save modified page table data */
243
244 ldr r2, cache_pred_disable_mask
245 and r4, r2
246 mcr p15, 0, r4, c1, c0, 0
247 dsb
248 isb
249
250 ldr r0, =restoremmu_on
251 bx r0
252
253/*
254 * ==============================
255 * == Exit point from OFF mode ==
256 * ==============================
257 */
258restoremmu_on:
259
260 ldmfd sp!, {r0-r12, pc} @ restore regs and return
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index a156d2108df1..3ffdbc92ba82 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -59,6 +59,11 @@ unsigned int __init sh73a0_get_core_count(void)
59{ 59{
60 void __iomem *scu_base = scu_base_addr(); 60 void __iomem *scu_base = scu_base_addr();
61 61
62#ifdef CONFIG_HAVE_ARM_TWD
63 /* twd_base needs to be initialized before percpu_timer_setup() */
64 twd_base = (void __iomem *)0xf0000600;
65#endif
66
62 return scu_get_core_count(scu_base); 67 return scu_get_core_count(scu_base);
63} 68}
64 69
@@ -82,10 +87,6 @@ int __cpuinit sh73a0_boot_secondary(unsigned int cpu)
82 87
83void __init sh73a0_smp_prepare_cpus(void) 88void __init sh73a0_smp_prepare_cpus(void)
84{ 89{
85#ifdef CONFIG_HAVE_ARM_TWD
86 twd_base = (void __iomem *)0xf0000600;
87#endif
88
89 scu_enable(scu_base_addr()); 90 scu_enable(scu_base_addr());
90 91
91 /* Map the reset vector (in headsmp.S) */ 92 /* Map the reset vector (in headsmp.S) */
diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c
new file mode 100644
index 000000000000..c1febe13f709
--- /dev/null
+++ b/arch/arm/mach-shmobile/suspend.c
@@ -0,0 +1,47 @@
1/*
2 * Suspend-to-RAM support code for SH-Mobile ARM
3 *
4 * Copyright (C) 2011 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pm.h>
12#include <linux/suspend.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <asm/system.h>
16#include <asm/io.h>
17
18static int shmobile_suspend_default_enter(suspend_state_t suspend_state)
19{
20 cpu_do_idle();
21 return 0;
22}
23
24static int shmobile_suspend_begin(suspend_state_t state)
25{
26 disable_hlt();
27 return 0;
28}
29
30static void shmobile_suspend_end(void)
31{
32 enable_hlt();
33}
34
35struct platform_suspend_ops shmobile_suspend_ops = {
36 .begin = shmobile_suspend_begin,
37 .end = shmobile_suspend_end,
38 .enter = shmobile_suspend_default_enter,
39 .valid = suspend_valid_only_mem,
40};
41
42static int __init shmobile_suspend_init(void)
43{
44 suspend_set_ops(&shmobile_suspend_ops);
45 return 0;
46}
47late_initcall(shmobile_suspend_init);
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 58626013aa32..54429d015954 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -12,9 +12,12 @@ menu "Ux500 SoC"
12 12
13config UX500_SOC_DB5500 13config UX500_SOC_DB5500
14 bool "DB5500" 14 bool "DB5500"
15 select MFD_DB5500_PRCMU
15 16
16config UX500_SOC_DB8500 17config UX500_SOC_DB8500
17 bool "DB8500" 18 bool "DB8500"
19 select MFD_DB8500_PRCMU
20 select REGULATOR_DB8500_PRCMU
18 21
19endmenu 22endmenu
20 23
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index b549a8fb4231..1694916e6822 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -5,7 +5,7 @@
5obj-y := clock.o cpu.o devices.o devices-common.o \ 5obj-y := clock.o cpu.o devices.o devices-common.o \
6 id.o usb.o 6 id.o usb.o
7obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o dma-db5500.o 7obj-$(CONFIG_UX500_SOC_DB5500) += cpu-db5500.o dma-db5500.o
8obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o prcmu.o 8obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
9obj-$(CONFIG_MACH_U8500) += board-mop500.o board-mop500-sdi.o \ 9obj-$(CONFIG_MACH_U8500) += board-mop500.o board-mop500-sdi.o \
10 board-mop500-regulators.o \ 10 board-mop500-regulators.o \
11 board-mop500-uib.o board-mop500-stuib.o \ 11 board-mop500-uib.o board-mop500-stuib.o \
@@ -17,4 +17,4 @@ obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
17obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o 17obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
18obj-$(CONFIG_U5500_MODEM_IRQ) += modem-irq-db5500.o 18obj-$(CONFIG_U5500_MODEM_IRQ) += modem-irq-db5500.o
19obj-$(CONFIG_U5500_MBOX) += mbox-db5500.o 19obj-$(CONFIG_U5500_MBOX) += mbox-db5500.o
20obj-$(CONFIG_CPU_FREQ) += cpufreq.o 20
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
index c9dc2eff3cb2..c01bc19e3c5e 100644
--- a/arch/arm/mach-ux500/cpu-db5500.c
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -188,6 +188,8 @@ void __init u5500_map_io(void)
188 ux500_map_io(); 188 ux500_map_io();
189 189
190 iotable_init(u5500_io_desc, ARRAY_SIZE(u5500_io_desc)); 190 iotable_init(u5500_io_desc, ARRAY_SIZE(u5500_io_desc));
191
192 _PRCMU_BASE = __io_address(U5500_PRCMU_BASE);
191} 193}
192 194
193static int usb_db5500_rx_dma_cfg[] = { 195static int usb_db5500_rx_dma_cfg[] = {
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 516126cb357d..c3c417656bd9 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -87,6 +87,8 @@ void __init u8500_map_io(void)
87 iotable_init(u8500_v1_io_desc, ARRAY_SIZE(u8500_v1_io_desc)); 87 iotable_init(u8500_v1_io_desc, ARRAY_SIZE(u8500_v1_io_desc));
88 else if (cpu_is_u8500v2()) 88 else if (cpu_is_u8500v2())
89 iotable_init(u8500_v2_io_desc, ARRAY_SIZE(u8500_v2_io_desc)); 89 iotable_init(u8500_v2_io_desc, ARRAY_SIZE(u8500_v2_io_desc));
90
91 _PRCMU_BASE = __io_address(U8500_PRCMU_BASE);
90} 92}
91 93
92static struct resource db8500_pmu_resources[] = { 94static struct resource db8500_pmu_resources[] = {
@@ -129,9 +131,14 @@ static struct platform_device db8500_pmu_device = {
129 .dev.platform_data = &db8500_pmu_platdata, 131 .dev.platform_data = &db8500_pmu_platdata,
130}; 132};
131 133
134static struct platform_device db8500_prcmu_device = {
135 .name = "db8500-prcmu",
136};
137
132static struct platform_device *platform_devs[] __initdata = { 138static struct platform_device *platform_devs[] __initdata = {
133 &u8500_dma40_device, 139 &u8500_dma40_device,
134 &db8500_pmu_device, 140 &db8500_pmu_device,
141 &db8500_prcmu_device,
135}; 142};
136 143
137static resource_size_t __initdata db8500_gpio_base[] = { 144static resource_size_t __initdata db8500_gpio_base[] = {
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index 5a43107c6232..1da23bb87c16 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -8,6 +8,8 @@
8#include <linux/platform_device.h> 8#include <linux/platform_device.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/mfd/db8500-prcmu.h>
12#include <linux/mfd/db5500-prcmu.h>
11 13
12#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
13#include <asm/hardware/cache-l2x0.h> 15#include <asm/hardware/cache-l2x0.h>
@@ -19,10 +21,11 @@
19#include <mach/hardware.h> 21#include <mach/hardware.h>
20#include <mach/setup.h> 22#include <mach/setup.h>
21#include <mach/devices.h> 23#include <mach/devices.h>
22#include <mach/prcmu.h>
23 24
24#include "clock.h" 25#include "clock.h"
25 26
27void __iomem *_PRCMU_BASE;
28
26#ifdef CONFIG_CACHE_L2X0 29#ifdef CONFIG_CACHE_L2X0
27static void __iomem *l2x0_base; 30static void __iomem *l2x0_base;
28#endif 31#endif
@@ -47,6 +50,8 @@ void __init ux500_init_irq(void)
47 * Init clocks here so that they are available for system timer 50 * Init clocks here so that they are available for system timer
48 * initialization. 51 * initialization.
49 */ 52 */
53 if (cpu_is_u5500())
54 db5500_prcmu_early_init();
50 if (cpu_is_u8500()) 55 if (cpu_is_u8500())
51 prcmu_early_init(); 56 prcmu_early_init();
52 clk_init(); 57 clk_init();
diff --git a/arch/arm/mach-ux500/cpufreq.c b/arch/arm/mach-ux500/cpufreq.c
deleted file mode 100644
index 5c5b747f134d..000000000000
--- a/arch/arm/mach-ux500/cpufreq.c
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * CPU frequency scaling for u8500
3 * Inspired by linux/arch/arm/mach-davinci/cpufreq.c
4 *
5 * Copyright (C) STMicroelectronics 2009
6 * Copyright (C) ST-Ericsson SA 2010
7 *
8 * License Terms: GNU General Public License v2
9 *
10 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
11 * Author: Martin Persson <martin.persson@stericsson.com>
12 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
13 *
14 */
15
16#include <linux/platform_device.h>
17#include <linux/kernel.h>
18#include <linux/cpufreq.h>
19#include <linux/delay.h>
20
21#include <mach/hardware.h>
22#include <mach/prcmu.h>
23#include <mach/prcmu-defs.h>
24
25#define DRIVER_NAME "cpufreq-u8500"
26#define CPUFREQ_NAME "u8500"
27
28static struct device *dev;
29
30static struct cpufreq_frequency_table freq_table[] = {
31 [0] = {
32 .index = 0,
33 .frequency = 200000,
34 },
35 [1] = {
36 .index = 1,
37 .frequency = 300000,
38 },
39 [2] = {
40 .index = 2,
41 .frequency = 600000,
42 },
43 [3] = {
44 /* Used for CPU_OPP_MAX, if available */
45 .index = 3,
46 .frequency = CPUFREQ_TABLE_END,
47 },
48 [4] = {
49 .index = 4,
50 .frequency = CPUFREQ_TABLE_END,
51 },
52};
53
54static enum prcmu_cpu_opp index2opp[] = {
55 CPU_OPP_EXT_CLK,
56 CPU_OPP_50,
57 CPU_OPP_100,
58 CPU_OPP_MAX
59};
60
61static int u8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
62{
63 return cpufreq_frequency_table_verify(policy, freq_table);
64}
65
66static int u8500_cpufreq_target(struct cpufreq_policy *policy,
67 unsigned int target_freq,
68 unsigned int relation)
69{
70 struct cpufreq_freqs freqs;
71 unsigned int index;
72 int ret = 0;
73
74 /*
75 * Ensure desired rate is within allowed range. Some govenors
76 * (ondemand) will just pass target_freq=0 to get the minimum.
77 */
78 if (target_freq < policy->cpuinfo.min_freq)
79 target_freq = policy->cpuinfo.min_freq;
80 if (target_freq > policy->cpuinfo.max_freq)
81 target_freq = policy->cpuinfo.max_freq;
82
83 ret = cpufreq_frequency_table_target(policy, freq_table,
84 target_freq, relation, &index);
85 if (ret < 0) {
86 dev_err(dev, "Could not look up next frequency\n");
87 return ret;
88 }
89
90 freqs.old = policy->cur;
91 freqs.new = freq_table[index].frequency;
92 freqs.cpu = policy->cpu;
93
94 if (freqs.old == freqs.new) {
95 dev_dbg(dev, "Current and target frequencies are equal\n");
96 return 0;
97 }
98
99 dev_dbg(dev, "transition: %u --> %u\n", freqs.old, freqs.new);
100 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
101
102 ret = prcmu_set_cpu_opp(index2opp[index]);
103 if (ret < 0) {
104 dev_err(dev, "Failed to set OPP level\n");
105 return ret;
106 }
107
108 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
109
110 return ret;
111}
112
113static unsigned int u8500_cpufreq_getspeed(unsigned int cpu)
114{
115 int i;
116
117 for (i = 0; prcmu_get_cpu_opp() != index2opp[i]; i++)
118 ;
119 return freq_table[i].frequency;
120}
121
122static int __cpuinit u8500_cpu_init(struct cpufreq_policy *policy)
123{
124 int res;
125
126 BUILD_BUG_ON(ARRAY_SIZE(index2opp) + 1 != ARRAY_SIZE(freq_table));
127
128 if (cpu_is_u8500v2()) {
129 freq_table[1].frequency = 400000;
130 freq_table[2].frequency = 800000;
131 if (prcmu_has_arm_maxopp())
132 freq_table[3].frequency = 1000000;
133 }
134
135 /* get policy fields based on the table */
136 res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
137 if (!res)
138 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
139 else {
140 dev_err(dev, "u8500-cpufreq : Failed to read policy table\n");
141 return res;
142 }
143
144 policy->min = policy->cpuinfo.min_freq;
145 policy->max = policy->cpuinfo.max_freq;
146 policy->cur = u8500_cpufreq_getspeed(policy->cpu);
147 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
148
149 /*
150 * FIXME : Need to take time measurement across the target()
151 * function with no/some/all drivers in the notification
152 * list.
153 */
154 policy->cpuinfo.transition_latency = 200 * 1000; /* in ns */
155
156 /* policy sharing between dual CPUs */
157 cpumask_copy(policy->cpus, &cpu_present_map);
158
159 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
160
161 return res;
162}
163
164static struct freq_attr *u8500_cpufreq_attr[] = {
165 &cpufreq_freq_attr_scaling_available_freqs,
166 NULL,
167};
168static int u8500_cpu_exit(struct cpufreq_policy *policy)
169{
170 cpufreq_frequency_table_put_attr(policy->cpu);
171 return 0;
172}
173
174static struct cpufreq_driver u8500_driver = {
175 .owner = THIS_MODULE,
176 .flags = CPUFREQ_STICKY,
177 .verify = u8500_cpufreq_verify_speed,
178 .target = u8500_cpufreq_target,
179 .get = u8500_cpufreq_getspeed,
180 .init = u8500_cpu_init,
181 .exit = u8500_cpu_exit,
182 .name = CPUFREQ_NAME,
183 .attr = u8500_cpufreq_attr,
184};
185
186static int __init u8500_cpufreq_probe(struct platform_device *pdev)
187{
188 dev = &pdev->dev;
189 return cpufreq_register_driver(&u8500_driver);
190}
191
192static int __exit u8500_cpufreq_remove(struct platform_device *pdev)
193{
194 return cpufreq_unregister_driver(&u8500_driver);
195}
196
197static struct platform_driver u8500_cpufreq_driver = {
198 .driver = {
199 .name = DRIVER_NAME,
200 .owner = THIS_MODULE,
201 },
202 .remove = __exit_p(u8500_cpufreq_remove),
203};
204
205static int __init u8500_cpufreq_init(void)
206{
207 return platform_driver_probe(&u8500_cpufreq_driver,
208 &u8500_cpufreq_probe);
209}
210
211device_initcall(u8500_cpufreq_init);
diff --git a/arch/arm/mach-ux500/include/mach/db5500-regs.h b/arch/arm/mach-ux500/include/mach/db5500-regs.h
index bd88c1e74060..6ad983294103 100644
--- a/arch/arm/mach-ux500/include/mach/db5500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db5500-regs.h
@@ -17,6 +17,8 @@
17#define U5500_GIC_DIST_BASE 0xA0411000 17#define U5500_GIC_DIST_BASE 0xA0411000
18#define U5500_GIC_CPU_BASE 0xA0410100 18#define U5500_GIC_CPU_BASE 0xA0410100
19#define U5500_DMA_BASE 0x90030000 19#define U5500_DMA_BASE 0x90030000
20#define U5500_STM_BASE 0x90020000
21#define U5500_STM_REG_BASE (U5500_STM_BASE + 0xF000)
20#define U5500_MCDE_BASE 0xA0400000 22#define U5500_MCDE_BASE 0xA0400000
21#define U5500_MODEM_BASE 0xB0000000 23#define U5500_MODEM_BASE 0xB0000000
22#define U5500_L2CC_BASE 0xA0412000 24#define U5500_L2CC_BASE 0xA0412000
@@ -29,7 +31,9 @@
29#define U5500_NAND0_BASE 0x60000000 31#define U5500_NAND0_BASE 0x60000000
30#define U5500_NAND1_BASE 0x70000000 32#define U5500_NAND1_BASE 0x70000000
31#define U5500_TWD_BASE 0xa0410600 33#define U5500_TWD_BASE 0xa0410600
34#define U5500_ICN_BASE 0xA0040000
32#define U5500_B2R2_BASE 0xa0200000 35#define U5500_B2R2_BASE 0xa0200000
36#define U5500_BOOT_ROM_BASE 0x90000000
33 37
34#define U5500_FSMC_BASE (U5500_PER1_BASE + 0x0000) 38#define U5500_FSMC_BASE (U5500_PER1_BASE + 0x0000)
35#define U5500_SDI0_BASE (U5500_PER1_BASE + 0x1000) 39#define U5500_SDI0_BASE (U5500_PER1_BASE + 0x1000)
@@ -60,6 +64,7 @@
60#define U5500_MSP1_BASE (U5500_PER4_BASE + 0x9000) 64#define U5500_MSP1_BASE (U5500_PER4_BASE + 0x9000)
61#define U5500_GPIO2_BASE (U5500_PER4_BASE + 0xA000) 65#define U5500_GPIO2_BASE (U5500_PER4_BASE + 0xA000)
62#define U5500_CDETECT_BASE (U5500_PER4_BASE + 0xF000) 66#define U5500_CDETECT_BASE (U5500_PER4_BASE + 0xF000)
67#define U5500_PRCMU_TCDM_BASE (U5500_PER4_BASE + 0x18000)
63 68
64#define U5500_SPI0_BASE (U5500_PER5_BASE + 0x0000) 69#define U5500_SPI0_BASE (U5500_PER5_BASE + 0x0000)
65#define U5500_SPI1_BASE (U5500_PER5_BASE + 0x1000) 70#define U5500_SPI1_BASE (U5500_PER5_BASE + 0x1000)
@@ -83,7 +88,7 @@
83#define U5500_HASH0_BASE (U5500_PER6_BASE + 0x1000) 88#define U5500_HASH0_BASE (U5500_PER6_BASE + 0x1000)
84#define U5500_HASH1_BASE (U5500_PER6_BASE + 0x2000) 89#define U5500_HASH1_BASE (U5500_PER6_BASE + 0x2000)
85#define U5500_PKA_BASE (U5500_PER6_BASE + 0x4000) 90#define U5500_PKA_BASE (U5500_PER6_BASE + 0x4000)
86#define U5500_PKAM_BASE (U5500_PER6_BASE + 0x5000) 91#define U5500_PKAM_BASE (U5500_PER6_BASE + 0x5100)
87#define U5500_MTU0_BASE (U5500_PER6_BASE + 0x6000) 92#define U5500_MTU0_BASE (U5500_PER6_BASE + 0x6000)
88#define U5500_MTU1_BASE (U5500_PER6_BASE + 0x7000) 93#define U5500_MTU1_BASE (U5500_PER6_BASE + 0x7000)
89#define U5500_CR_BASE (U5500_PER6_BASE + 0x8000) 94#define U5500_CR_BASE (U5500_PER6_BASE + 0x8000)
@@ -114,8 +119,19 @@
114#define U5500_MBOX2_LOCAL_START (U5500_MBOX_BASE + 0x20) 119#define U5500_MBOX2_LOCAL_START (U5500_MBOX_BASE + 0x20)
115#define U5500_MBOX2_LOCAL_END (U5500_MBOX_BASE + 0x3F) 120#define U5500_MBOX2_LOCAL_END (U5500_MBOX_BASE + 0x3F)
116 121
117#define U5500_ESRAM_BASE 0x40000000 122#define U5500_ACCCON_BASE_SEC (0xBFFF0000)
123#define U5500_ACCCON_BASE (0xBFFF1000)
124#define U5500_ACCCON_CPUVEC_RESET_ADDR_OFFSET (0x00000020)
125#define U5500_ACCCON_ACC_CPU_CTRL_OFFSET (0x000000BC)
126
127#define U5500_ESRAM_BASE 0x40000000
118#define U5500_ESRAM_DMA_LCPA_OFFSET 0x10000 128#define U5500_ESRAM_DMA_LCPA_OFFSET 0x10000
119#define U5500_DMA_LCPA_BASE (U5500_ESRAM_BASE + U5500_ESRAM_DMA_LCPA_OFFSET) 129#define U5500_DMA_LCPA_BASE (U5500_ESRAM_BASE + U5500_ESRAM_DMA_LCPA_OFFSET)
120 130
131#define U5500_MCDE_SIZE 0x1000
132#define U5500_DSI_LINK_SIZE 0x1000
133#define U5500_DSI_LINK_COUNT 0x2
134#define U5500_DSI_LINK1_BASE (U5500_MCDE_BASE + U5500_MCDE_SIZE)
135#define U5500_DSI_LINK2_BASE (U5500_DSI_LINK1_BASE + U5500_DSI_LINK_SIZE)
136
121#endif 137#endif
diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h
index 16647b255378..049997109cf9 100644
--- a/arch/arm/mach-ux500/include/mach/db8500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h
@@ -15,8 +15,13 @@
15#define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE) 15#define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE)
16#define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE) 16#define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE)
17#define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE) 17#define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE)
18/* Use bank 4 for DMA LCPA */ 18/*
19#define U8500_DMA_LCPA_BASE U8500_ESRAM_BANK4 19 * on V1 DMA uses 4KB for logical parameters position is right after the 64KB
20 * reserved for security
21 */
22#define U8500_ESRAM_DMA_LCPA_OFFSET 0x10000
23
24#define U8500_DMA_LCPA_BASE (U8500_ESRAM_BANK0 + U8500_ESRAM_DMA_LCPA_OFFSET)
20#define U8500_DMA_LCPA_BASE_ED (U8500_ESRAM_BANK4 + 0x4000) 25#define U8500_DMA_LCPA_BASE_ED (U8500_ESRAM_BANK4 + 0x4000)
21 26
22#define U8500_PER3_BASE 0x80000000 27#define U8500_PER3_BASE 0x80000000
@@ -27,9 +32,12 @@
27#define U8500_B2R2_BASE 0x80130000 32#define U8500_B2R2_BASE 0x80130000
28#define U8500_HSEM_BASE 0x80140000 33#define U8500_HSEM_BASE 0x80140000
29#define U8500_PER4_BASE 0x80150000 34#define U8500_PER4_BASE 0x80150000
35#define U8500_TPIU_BASE 0x80190000
30#define U8500_ICN_BASE 0x81000000 36#define U8500_ICN_BASE 0x81000000
31 37
32#define U8500_BOOT_ROM_BASE 0x90000000 38#define U8500_BOOT_ROM_BASE 0x90000000
39/* ASIC ID is at 0xbf4 offset within this region */
40#define U8500_ASIC_ID_BASE 0x9001D000
33 41
34#define U8500_PER6_BASE 0xa03c0000 42#define U8500_PER6_BASE 0xa03c0000
35#define U8500_PER5_BASE 0xa03e0000 43#define U8500_PER5_BASE 0xa03e0000
@@ -70,13 +78,15 @@
70 78
71/* per6 base addresses */ 79/* per6 base addresses */
72#define U8500_RNG_BASE (U8500_PER6_BASE + 0x0000) 80#define U8500_RNG_BASE (U8500_PER6_BASE + 0x0000)
73#define U8500_PKA_BASE (U8500_PER6_BASE + 0x1000) 81#define U8500_HASH0_BASE (U8500_PER6_BASE + 0x1000)
74#define U8500_PKAM_BASE (U8500_PER6_BASE + 0x2000) 82#define U8500_HASH1_BASE (U8500_PER6_BASE + 0x2000)
83#define U8500_PKA_BASE (U8500_PER6_BASE + 0x4000)
84#define U8500_PKAM_BASE (U8500_PER6_BASE + 0x5100)
75#define U8500_MTU0_BASE (U8500_PER6_BASE + 0x6000) /* v1 */ 85#define U8500_MTU0_BASE (U8500_PER6_BASE + 0x6000) /* v1 */
76#define U8500_MTU1_BASE (U8500_PER6_BASE + 0x7000) /* v1 */ 86#define U8500_MTU1_BASE (U8500_PER6_BASE + 0x7000) /* v1 */
77#define U8500_CR_BASE (U8500_PER6_BASE + 0x8000) /* v1 */ 87#define U8500_CR_BASE (U8500_PER6_BASE + 0x8000) /* v1 */
78#define U8500_CRYPTO0_BASE (U8500_PER6_BASE + 0xa000) 88#define U8500_CRYP0_BASE (U8500_PER6_BASE + 0xa000)
79#define U8500_CRYPTO1_BASE (U8500_PER6_BASE + 0xb000) 89#define U8500_CRYP1_BASE (U8500_PER6_BASE + 0xb000)
80#define U8500_CLKRST6_BASE (U8500_PER6_BASE + 0xf000) 90#define U8500_CLKRST6_BASE (U8500_PER6_BASE + 0xf000)
81 91
82/* per5 base addresses */ 92/* per5 base addresses */
@@ -93,7 +103,8 @@
93#define U8500_DMC_BASE (U8500_PER4_BASE + 0x06000) 103#define U8500_DMC_BASE (U8500_PER4_BASE + 0x06000)
94#define U8500_PRCMU_BASE (U8500_PER4_BASE + 0x07000) 104#define U8500_PRCMU_BASE (U8500_PER4_BASE + 0x07000)
95#define U8500_PRCMU_TCDM_BASE_V1 (U8500_PER4_BASE + 0x0f000) 105#define U8500_PRCMU_TCDM_BASE_V1 (U8500_PER4_BASE + 0x0f000)
96#define U8500_PRCMU_TCDM_BASE (U8500_PER4_BASE + 0x68000) 106#define U8500_PRCMU_TCDM_BASE (U8500_PER4_BASE + 0x68000)
107#define U8500_PRCMU_TCPM_BASE (U8500_PER4_BASE + 0x60000)
97 108
98/* per3 base addresses */ 109/* per3 base addresses */
99#define U8500_FSMC_BASE (U8500_PER3_BASE + 0x0000) 110#define U8500_FSMC_BASE (U8500_PER3_BASE + 0x0000)
@@ -124,6 +135,7 @@
124#define U8500_I2C1_BASE (U8500_PER1_BASE + 0x2000) 135#define U8500_I2C1_BASE (U8500_PER1_BASE + 0x2000)
125#define U8500_MSP0_BASE (U8500_PER1_BASE + 0x3000) 136#define U8500_MSP0_BASE (U8500_PER1_BASE + 0x3000)
126#define U8500_MSP1_BASE (U8500_PER1_BASE + 0x4000) 137#define U8500_MSP1_BASE (U8500_PER1_BASE + 0x4000)
138#define U8500_MSP3_BASE (U8500_PER1_BASE + 0x5000)
127#define U8500_SDI0_BASE (U8500_PER1_BASE + 0x6000) 139#define U8500_SDI0_BASE (U8500_PER1_BASE + 0x6000)
128#define U8500_I2C2_BASE (U8500_PER1_BASE + 0x8000) 140#define U8500_I2C2_BASE (U8500_PER1_BASE + 0x8000)
129#define U8500_SPI3_BASE (U8500_PER1_BASE + 0x9000) 141#define U8500_SPI3_BASE (U8500_PER1_BASE + 0x9000)
@@ -143,4 +155,15 @@
143#define U8500_GPIOBANK7_BASE (U8500_GPIO2_BASE + 0x80) 155#define U8500_GPIOBANK7_BASE (U8500_GPIO2_BASE + 0x80)
144#define U8500_GPIOBANK8_BASE U8500_GPIO3_BASE 156#define U8500_GPIOBANK8_BASE U8500_GPIO3_BASE
145 157
158#define U8500_MCDE_SIZE 0x1000
159#define U8500_DSI_LINK_SIZE 0x1000
160#define U8500_DSI_LINK1_BASE (U8500_MCDE_BASE + U8500_MCDE_SIZE)
161#define U8500_DSI_LINK2_BASE (U8500_DSI_LINK1_BASE + U8500_DSI_LINK_SIZE)
162#define U8500_DSI_LINK3_BASE (U8500_DSI_LINK2_BASE + U8500_DSI_LINK_SIZE)
163#define U8500_DSI_LINK_COUNT 0x3
164
165/* Modem and APE physical addresses */
166#define U8500_MODEM_BASE 0xe000000
167#define U8500_APE_BASE 0x6000000
168
146#endif 169#endif
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index bf63f2631ba0..2c6f71049f2e 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -35,6 +35,7 @@
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36 36
37#include <mach/id.h> 37#include <mach/id.h>
38extern void __iomem *_PRCMU_BASE;
38 39
39#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x) 40#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
40 41
diff --git a/arch/arm/mach-ux500/include/mach/id.h b/arch/arm/mach-ux500/include/mach/id.h
index f1288d10b6ab..02b541a37ee5 100644
--- a/arch/arm/mach-ux500/include/mach/id.h
+++ b/arch/arm/mach-ux500/include/mach/id.h
@@ -75,6 +75,26 @@ static inline bool __attribute_const__ cpu_is_u8500v2(void)
75 return cpu_is_u8500() && ((dbx500_revision() & 0xf0) == 0xB0); 75 return cpu_is_u8500() && ((dbx500_revision() & 0xf0) == 0xB0);
76} 76}
77 77
78static inline bool cpu_is_u8500v20(void)
79{
80 return cpu_is_u8500() && (dbx500_revision() == 0xB0);
81}
82
83static inline bool cpu_is_u8500v21(void)
84{
85 return cpu_is_u8500() && (dbx500_revision() == 0xB1);
86}
87
88static inline bool cpu_is_u8500v20_or_later(void)
89{
90 return cpu_is_u8500() && !cpu_is_u8500v10() && !cpu_is_u8500v11();
91}
92
93static inline bool ux500_is_svp(void)
94{
95 return false;
96}
97
78#define ux500_unknown_soc() BUG() 98#define ux500_unknown_soc() BUG()
79 99
80#endif 100#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
index 97ef55f84934..47969909836c 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
@@ -50,6 +50,11 @@
50 50
51#define MOP500_IRQ_END MOP500_NR_IRQS 51#define MOP500_IRQ_END MOP500_NR_IRQS
52 52
53/*
54 * We may have several boards, but only one will run at a
55 * time, so the one with most IRQs will bump this ahead,
56 * but the IRQ_BOARD_START remains the same for either board.
57 */
53#if MOP500_IRQ_END > IRQ_BOARD_END 58#if MOP500_IRQ_END > IRQ_BOARD_END
54#undef IRQ_BOARD_END 59#undef IRQ_BOARD_END
55#define IRQ_BOARD_END MOP500_IRQ_END 60#define IRQ_BOARD_END MOP500_IRQ_END
diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h b/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h
new file mode 100644
index 000000000000..29d972c7717b
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/irqs-board-u5500.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef __MACH_IRQS_BOARD_U5500_H
8#define __MACH_IRQS_BOARD_U5500_H
9
10#define AB5500_NR_IRQS 5
11#define IRQ_AB5500_BASE IRQ_BOARD_START
12#define IRQ_AB5500_END (IRQ_AB5500_BASE + AB5500_NR_IRQS)
13
14#define U5500_IRQ_END IRQ_AB5500_END
15
16#if IRQ_BOARD_END < U5500_IRQ_END
17#undef IRQ_BOARD_END
18#define IRQ_BOARD_END U5500_IRQ_END
19#endif
20
21#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs-db5500.h b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
index bfa123dbec3b..77239776a6f2 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-db5500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-db5500.h
@@ -83,4 +83,31 @@
83#define IRQ_DB5500_GPIO6 (IRQ_SHPI_START + 125) 83#define IRQ_DB5500_GPIO6 (IRQ_SHPI_START + 125)
84#define IRQ_DB5500_GPIO7 (IRQ_SHPI_START + 126) 84#define IRQ_DB5500_GPIO7 (IRQ_SHPI_START + 126)
85 85
86#ifdef CONFIG_UX500_SOC_DB5500
87
88/*
89 * After the GPIO ones we reserve a range of IRQ:s in which virtual
90 * IRQ:s representing modem IRQ:s can be allocated
91 */
92#define IRQ_MODEM_EVENTS_BASE IRQ_SOC_START
93#define IRQ_MODEM_EVENTS_NBR 72
94#define IRQ_MODEM_EVENTS_END (IRQ_MODEM_EVENTS_BASE + IRQ_MODEM_EVENTS_NBR)
95
96/* List of virtual IRQ:s that are allocated from the range above */
97#define MBOX_PAIR0_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 43)
98#define MBOX_PAIR1_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 45)
99#define MBOX_PAIR2_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 41)
100
101/*
102 * We may have several SoCs, but only one will run at a
103 * time, so the one with most IRQs will bump this ahead,
104 * but the IRQ_SOC_START remains the same for either SoC.
105 */
106#if IRQ_SOC_END < IRQ_MODEM_EVENTS_END
107#undef IRQ_SOC_END
108#define IRQ_SOC_END IRQ_MODEM_EVENTS_END
109#endif
110
111#endif /* CONFIG_UX500_SOC_DB5500 */
112
86#endif 113#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs-db8500.h b/arch/arm/mach-ux500/include/mach/irqs-db8500.h
index 8b5d9f0a1633..68bc14974608 100644
--- a/arch/arm/mach-ux500/include/mach/irqs-db8500.h
+++ b/arch/arm/mach-ux500/include/mach/irqs-db8500.h
@@ -93,4 +93,58 @@
93#define IRQ_DB8500_GPIO7 (IRQ_SHPI_START + 126) 93#define IRQ_DB8500_GPIO7 (IRQ_SHPI_START + 126)
94#define IRQ_DB8500_GPIO8 (IRQ_SHPI_START + 127) 94#define IRQ_DB8500_GPIO8 (IRQ_SHPI_START + 127)
95 95
96#define IRQ_CA_WAKE_REQ_ED (IRQ_SHPI_START + 71)
97#define IRQ_AC_READ_NOTIFICATION_0_ED (IRQ_SHPI_START + 66)
98#define IRQ_AC_READ_NOTIFICATION_1_ED (IRQ_SHPI_START + 64)
99#define IRQ_CA_MSG_PEND_NOTIFICATION_0_ED (IRQ_SHPI_START + 67)
100#define IRQ_CA_MSG_PEND_NOTIFICATION_1_ED (IRQ_SHPI_START + 65)
101
102#define IRQ_CA_WAKE_REQ_V1 (IRQ_SHPI_START + 83)
103#define IRQ_AC_READ_NOTIFICATION_0_V1 (IRQ_SHPI_START + 78)
104#define IRQ_AC_READ_NOTIFICATION_1_V1 (IRQ_SHPI_START + 76)
105#define IRQ_CA_MSG_PEND_NOTIFICATION_0_V1 (IRQ_SHPI_START + 79)
106#define IRQ_CA_MSG_PEND_NOTIFICATION_1_V1 (IRQ_SHPI_START + 77)
107
108#ifdef CONFIG_UX500_SOC_DB8500
109
110/* Virtual interrupts corresponding to the PRCMU wakeups. */
111#define IRQ_PRCMU_BASE IRQ_SOC_START
112#define NUM_PRCMU_WAKEUPS (IRQ_PRCMU_END - IRQ_PRCMU_BASE)
113
114#define IRQ_PRCMU_RTC (IRQ_PRCMU_BASE)
115#define IRQ_PRCMU_RTT0 (IRQ_PRCMU_BASE + 1)
116#define IRQ_PRCMU_RTT1 (IRQ_PRCMU_BASE + 2)
117#define IRQ_PRCMU_HSI0 (IRQ_PRCMU_BASE + 3)
118#define IRQ_PRCMU_HSI1 (IRQ_PRCMU_BASE + 4)
119#define IRQ_PRCMU_CA_WAKE (IRQ_PRCMU_BASE + 5)
120#define IRQ_PRCMU_USB (IRQ_PRCMU_BASE + 6)
121#define IRQ_PRCMU_ABB (IRQ_PRCMU_BASE + 7)
122#define IRQ_PRCMU_ABB_FIFO (IRQ_PRCMU_BASE + 8)
123#define IRQ_PRCMU_ARM (IRQ_PRCMU_BASE + 9)
124#define IRQ_PRCMU_MODEM_SW_RESET_REQ (IRQ_PRCMU_BASE + 10)
125#define IRQ_PRCMU_GPIO0 (IRQ_PRCMU_BASE + 11)
126#define IRQ_PRCMU_GPIO1 (IRQ_PRCMU_BASE + 12)
127#define IRQ_PRCMU_GPIO2 (IRQ_PRCMU_BASE + 13)
128#define IRQ_PRCMU_GPIO3 (IRQ_PRCMU_BASE + 14)
129#define IRQ_PRCMU_GPIO4 (IRQ_PRCMU_BASE + 15)
130#define IRQ_PRCMU_GPIO5 (IRQ_PRCMU_BASE + 16)
131#define IRQ_PRCMU_GPIO6 (IRQ_PRCMU_BASE + 17)
132#define IRQ_PRCMU_GPIO7 (IRQ_PRCMU_BASE + 18)
133#define IRQ_PRCMU_GPIO8 (IRQ_PRCMU_BASE + 19)
134#define IRQ_PRCMU_CA_SLEEP (IRQ_PRCMU_BASE + 20)
135#define IRQ_PRCMU_HOTMON_LOW (IRQ_PRCMU_BASE + 21)
136#define IRQ_PRCMU_HOTMON_HIGH (IRQ_PRCMU_BASE + 22)
137#define IRQ_PRCMU_END (IRQ_PRCMU_BASE + 23)
138
139/*
140 * We may have several SoCs, but only one will run at a
141 * time, so the one with most IRQs will bump this ahead,
142 * but the IRQ_SOC_START remains the same for either SoC.
143 */
144#if IRQ_SOC_END < IRQ_PRCMU_END
145#undef IRQ_SOC_END
146#define IRQ_SOC_END IRQ_PRCMU_END
147#endif
148
149#endif /* CONFIG_UX500_SOC_DB8500 */
96#endif 150#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs.h b/arch/arm/mach-ux500/include/mach/irqs.h
index ba1294c13c4d..9db68d264c5f 100644
--- a/arch/arm/mach-ux500/include/mach/irqs.h
+++ b/arch/arm/mach-ux500/include/mach/irqs.h
@@ -10,49 +10,47 @@
10#ifndef ASM_ARCH_IRQS_H 10#ifndef ASM_ARCH_IRQS_H
11#define ASM_ARCH_IRQS_H 11#define ASM_ARCH_IRQS_H
12 12
13#include <mach/irqs-db5500.h> 13#include <mach/hardware.h>
14#include <mach/irqs-db8500.h>
15 14
16#define IRQ_LOCALTIMER 29 15#define IRQ_LOCALTIMER 29
17#define IRQ_LOCALWDOG 30 16#define IRQ_LOCALWDOG 30
18 17
19/* Shared Peripheral Interrupt (SHPI) */ 18/* Shared Peripheral Interrupt (SHPI) */
20#define IRQ_SHPI_START 32 19#define IRQ_SHPI_START 32
21 20
22/* Interrupt numbers generic for shared peripheral */ 21/*
22 * MTU0 preserved for now until plat-nomadik is taught not to use it. Don't
23 * add any other IRQs here, use the irqs-dbx500.h files.
24 */
23#define IRQ_MTU0 (IRQ_SHPI_START + 4) 25#define IRQ_MTU0 (IRQ_SHPI_START + 4)
24 26
25/* There are 128 shared peripheral interrupts assigned to 27#define DBX500_NR_INTERNAL_IRQS 160
26 * INTID[160:32]. The first 32 interrupts are reserved.
27 */
28#define DBX500_NR_INTERNAL_IRQS 161
29 28
30/* After chip-specific IRQ numbers we have the GPIO ones */ 29/* After chip-specific IRQ numbers we have the GPIO ones */
31#define NOMADIK_NR_GPIO 288 30#define NOMADIK_NR_GPIO 288
32#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + DBX500_NR_INTERNAL_IRQS) 31#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + DBX500_NR_INTERNAL_IRQS)
33#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - DBX500_NR_INTERNAL_IRQS) 32#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - DBX500_NR_INTERNAL_IRQS)
34#define IRQ_BOARD_START NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO) 33#define IRQ_GPIO_END NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
34
35#define IRQ_SOC_START IRQ_GPIO_END
36/* This will be overridden by SoC-specific irq headers */
37#define IRQ_SOC_END IRQ_SOC_START
35 38
39#include <mach/irqs-db5500.h>
40#include <mach/irqs-db8500.h>
41
42#define IRQ_BOARD_START IRQ_SOC_END
36/* This will be overridden by board-specific irq headers */ 43/* This will be overridden by board-specific irq headers */
37#define IRQ_BOARD_END IRQ_BOARD_START 44#define IRQ_BOARD_END IRQ_BOARD_START
38 45
39#ifdef CONFIG_MACH_U8500 46#ifdef CONFIG_MACH_U8500
40#include <mach/irqs-board-mop500.h> 47#include <mach/irqs-board-mop500.h>
41#endif 48#endif
42 49
43/* 50#ifdef CONFIG_MACH_U5500
44 * After the board specific IRQ:s we reserve a range of IRQ:s in which virtual 51#include <mach/irqs-board-u5500.h>
45 * IRQ:s representing modem IRQ:s can be allocated 52#endif
46 */
47#define IRQ_MODEM_EVENTS_BASE (IRQ_BOARD_END + 1)
48#define IRQ_MODEM_EVENTS_NBR 72
49#define IRQ_MODEM_EVENTS_END (IRQ_MODEM_EVENTS_BASE + IRQ_MODEM_EVENTS_NBR)
50
51/* List of virtual IRQ:s that are allocated from the range above */
52#define MBOX_PAIR0_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 43)
53#define MBOX_PAIR1_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 45)
54#define MBOX_PAIR2_VIRT_IRQ (IRQ_MODEM_EVENTS_BASE + 41)
55 53
56#define NR_IRQS IRQ_MODEM_EVENTS_END 54#define NR_IRQS IRQ_BOARD_END
57 55
58#endif /* ASM_ARCH_IRQS_H */ 56#endif /* ASM_ARCH_IRQS_H */
diff --git a/arch/arm/mach-ux500/include/mach/prcmu-defs.h b/arch/arm/mach-ux500/include/mach/prcmu-defs.h
deleted file mode 100644
index 848ba64b561f..000000000000
--- a/arch/arm/mach-ux500/include/mach/prcmu-defs.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
6 * Author: Martin Persson <martin.persson@stericsson.com>
7 *
8 * License Terms: GNU General Public License v2
9 *
10 * PRCM Unit definitions
11 */
12
13#ifndef __MACH_PRCMU_DEFS_H
14#define __MACH_PRCMU_DEFS_H
15
16enum prcmu_cpu_opp {
17 CPU_OPP_INIT = 0x00,
18 CPU_OPP_NO_CHANGE = 0x01,
19 CPU_OPP_100 = 0x02,
20 CPU_OPP_50 = 0x03,
21 CPU_OPP_MAX = 0x04,
22 CPU_OPP_EXT_CLK = 0x07
23};
24enum prcmu_ape_opp {
25 APE_OPP_NO_CHANGE = 0x00,
26 APE_OPP_100 = 0x02,
27 APE_OPP_50 = 0x03,
28};
29
30#endif /* __MACH_PRCMU_DEFS_H */
diff --git a/arch/arm/mach-ux500/include/mach/prcmu.h b/arch/arm/mach-ux500/include/mach/prcmu.h
deleted file mode 100644
index c49e456162ef..000000000000
--- a/arch/arm/mach-ux500/include/mach/prcmu.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
6 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
7 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
8 *
9 * License Terms: GNU General Public License v2
10 *
11 * PRCM Unit f/w API
12 */
13#ifndef __MACH_PRCMU_H
14#define __MACH_PRCMU_H
15#include <mach/prcmu-defs.h>
16
17void __init prcmu_early_init(void);
18int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
19int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
20int prcmu_set_ape_opp(enum prcmu_ape_opp opp);
21int prcmu_set_cpu_opp(enum prcmu_cpu_opp opp);
22int prcmu_set_ape_cpu_opps(enum prcmu_ape_opp ape_opp,
23 enum prcmu_cpu_opp cpu_opp);
24int prcmu_get_ape_opp(void);
25int prcmu_get_cpu_opp(void);
26bool prcmu_has_arm_maxopp(void);
27
28#endif /* __MACH_PRCMU_H */
diff --git a/arch/arm/mach-ux500/prcmu.c b/arch/arm/mach-ux500/prcmu.c
deleted file mode 100644
index c522d26ef348..000000000000
--- a/arch/arm/mach-ux500/prcmu.c
+++ /dev/null
@@ -1,394 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * License Terms: GNU General Public License v2
6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
7 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
8 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
9 *
10 * U8500 PRCM Unit interface driver
11 *
12 */
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/mutex.h>
19#include <linux/completion.h>
20#include <linux/jiffies.h>
21#include <linux/bitops.h>
22#include <linux/interrupt.h>
23
24#include <mach/hardware.h>
25#include <mach/prcmu-regs.h>
26#include <mach/prcmu-defs.h>
27
28/* Global var to runtime determine TCDM base for v2 or v1 */
29static __iomem void *tcdm_base;
30
31#define _MBOX_HEADER (tcdm_base + 0xFE8)
32#define MBOX_HEADER_REQ_MB0 (_MBOX_HEADER + 0x0)
33
34#define REQ_MB1 (tcdm_base + 0xFD0)
35#define REQ_MB5 (tcdm_base + 0xE44)
36
37#define REQ_MB1_ARMOPP (REQ_MB1 + 0x0)
38#define REQ_MB1_APEOPP (REQ_MB1 + 0x1)
39#define REQ_MB1_BOOSTOPP (REQ_MB1 + 0x2)
40
41#define ACK_MB1 (tcdm_base + 0xE04)
42#define ACK_MB5 (tcdm_base + 0xDF4)
43
44#define ACK_MB1_CURR_ARMOPP (ACK_MB1 + 0x0)
45#define ACK_MB1_CURR_APEOPP (ACK_MB1 + 0x1)
46
47#define REQ_MB5_I2C_SLAVE_OP (REQ_MB5)
48#define REQ_MB5_I2C_HW_BITS (REQ_MB5 + 1)
49#define REQ_MB5_I2C_REG (REQ_MB5 + 2)
50#define REQ_MB5_I2C_VAL (REQ_MB5 + 3)
51
52#define ACK_MB5_I2C_STATUS (ACK_MB5 + 1)
53#define ACK_MB5_I2C_VAL (ACK_MB5 + 3)
54
55#define PRCM_AVS_VARM_MAX_OPP (tcdm_base + 0x2E4)
56#define PRCM_AVS_ISMODEENABLE 7
57#define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE)
58
59#define I2C_WRITE(slave) \
60 (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
61#define I2C_READ(slave) \
62 (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0) | BIT(0))
63#define I2C_STOP_EN BIT(3)
64
65enum mb1_h {
66 MB1H_ARM_OPP = 1,
67 MB1H_APE_OPP,
68 MB1H_ARM_APE_OPP,
69};
70
71static struct {
72 struct mutex lock;
73 struct completion work;
74 struct {
75 u8 arm_opp;
76 u8 ape_opp;
77 u8 arm_status;
78 u8 ape_status;
79 } ack;
80} mb1_transfer;
81
82enum ack_mb5_status {
83 I2C_WR_OK = 0x01,
84 I2C_RD_OK = 0x02,
85};
86
87#define MBOX_BIT BIT
88#define NUM_MBOX 8
89
90static struct {
91 struct mutex lock;
92 struct completion work;
93 bool failed;
94 struct {
95 u8 status;
96 u8 value;
97 } ack;
98} mb5_transfer;
99
100/**
101 * prcmu_abb_read() - Read register value(s) from the ABB.
102 * @slave: The I2C slave address.
103 * @reg: The (start) register address.
104 * @value: The read out value(s).
105 * @size: The number of registers to read.
106 *
107 * Reads register value(s) from the ABB.
108 * @size has to be 1 for the current firmware version.
109 */
110int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
111{
112 int r;
113
114 if (size != 1)
115 return -EINVAL;
116
117 r = mutex_lock_interruptible(&mb5_transfer.lock);
118 if (r)
119 return r;
120
121 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
122 cpu_relax();
123
124 writeb(I2C_READ(slave), REQ_MB5_I2C_SLAVE_OP);
125 writeb(I2C_STOP_EN, REQ_MB5_I2C_HW_BITS);
126 writeb(reg, REQ_MB5_I2C_REG);
127
128 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
129 if (!wait_for_completion_timeout(&mb5_transfer.work,
130 msecs_to_jiffies(500))) {
131 pr_err("prcmu: prcmu_abb_read timed out.\n");
132 r = -EIO;
133 goto unlock_and_return;
134 }
135 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
136 if (!r)
137 *value = mb5_transfer.ack.value;
138
139unlock_and_return:
140 mutex_unlock(&mb5_transfer.lock);
141 return r;
142}
143EXPORT_SYMBOL(prcmu_abb_read);
144
145/**
146 * prcmu_abb_write() - Write register value(s) to the ABB.
147 * @slave: The I2C slave address.
148 * @reg: The (start) register address.
149 * @value: The value(s) to write.
150 * @size: The number of registers to write.
151 *
152 * Reads register value(s) from the ABB.
153 * @size has to be 1 for the current firmware version.
154 */
155int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
156{
157 int r;
158
159 if (size != 1)
160 return -EINVAL;
161
162 r = mutex_lock_interruptible(&mb5_transfer.lock);
163 if (r)
164 return r;
165
166
167 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
168 cpu_relax();
169
170 writeb(I2C_WRITE(slave), REQ_MB5_I2C_SLAVE_OP);
171 writeb(I2C_STOP_EN, REQ_MB5_I2C_HW_BITS);
172 writeb(reg, REQ_MB5_I2C_REG);
173 writeb(*value, REQ_MB5_I2C_VAL);
174
175 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
176 if (!wait_for_completion_timeout(&mb5_transfer.work,
177 msecs_to_jiffies(500))) {
178 pr_err("prcmu: prcmu_abb_write timed out.\n");
179 r = -EIO;
180 goto unlock_and_return;
181 }
182 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
183
184unlock_and_return:
185 mutex_unlock(&mb5_transfer.lock);
186 return r;
187}
188EXPORT_SYMBOL(prcmu_abb_write);
189
190static int set_ape_cpu_opps(u8 header, enum prcmu_ape_opp ape_opp,
191 enum prcmu_cpu_opp cpu_opp)
192{
193 bool do_ape;
194 bool do_arm;
195 int err = 0;
196
197 do_ape = ((header == MB1H_APE_OPP) || (header == MB1H_ARM_APE_OPP));
198 do_arm = ((header == MB1H_ARM_OPP) || (header == MB1H_ARM_APE_OPP));
199
200 mutex_lock(&mb1_transfer.lock);
201
202 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
203 cpu_relax();
204
205 writeb(0, MBOX_HEADER_REQ_MB0);
206 writeb(cpu_opp, REQ_MB1_ARMOPP);
207 writeb(ape_opp, REQ_MB1_APEOPP);
208 writeb(0, REQ_MB1_BOOSTOPP);
209 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
210 wait_for_completion(&mb1_transfer.work);
211 if ((do_ape) && (mb1_transfer.ack.ape_status != 0))
212 err = -EIO;
213 if ((do_arm) && (mb1_transfer.ack.arm_status != 0))
214 err = -EIO;
215
216 mutex_unlock(&mb1_transfer.lock);
217
218 return err;
219}
220
221/**
222 * prcmu_set_ape_opp() - Set the OPP of the APE.
223 * @opp: The OPP to set.
224 *
225 * This function sets the OPP of the APE.
226 */
227int prcmu_set_ape_opp(enum prcmu_ape_opp opp)
228{
229 return set_ape_cpu_opps(MB1H_APE_OPP, opp, APE_OPP_NO_CHANGE);
230}
231EXPORT_SYMBOL(prcmu_set_ape_opp);
232
233/**
234 * prcmu_set_cpu_opp() - Set the OPP of the CPU.
235 * @opp: The OPP to set.
236 *
237 * This function sets the OPP of the CPU.
238 */
239int prcmu_set_cpu_opp(enum prcmu_cpu_opp opp)
240{
241 return set_ape_cpu_opps(MB1H_ARM_OPP, CPU_OPP_NO_CHANGE, opp);
242}
243EXPORT_SYMBOL(prcmu_set_cpu_opp);
244
245/**
246 * prcmu_set_ape_cpu_opps() - Set the OPPs of the APE and the CPU.
247 * @ape_opp: The APE OPP to set.
248 * @cpu_opp: The CPU OPP to set.
249 *
250 * This function sets the OPPs of the APE and the CPU.
251 */
252int prcmu_set_ape_cpu_opps(enum prcmu_ape_opp ape_opp,
253 enum prcmu_cpu_opp cpu_opp)
254{
255 return set_ape_cpu_opps(MB1H_ARM_APE_OPP, ape_opp, cpu_opp);
256}
257EXPORT_SYMBOL(prcmu_set_ape_cpu_opps);
258
259/**
260 * prcmu_get_ape_opp() - Get the OPP of the APE.
261 *
262 * This function gets the OPP of the APE.
263 */
264enum prcmu_ape_opp prcmu_get_ape_opp(void)
265{
266 return readb(ACK_MB1_CURR_APEOPP);
267}
268EXPORT_SYMBOL(prcmu_get_ape_opp);
269
270/**
271 * prcmu_get_cpu_opp() - Get the OPP of the CPU.
272 *
273 * This function gets the OPP of the CPU. The OPP is specified in %%.
274 * PRCMU_OPP_EXT is a special OPP value, not specified in %%.
275 */
276int prcmu_get_cpu_opp(void)
277{
278 return readb(ACK_MB1_CURR_ARMOPP);
279}
280EXPORT_SYMBOL(prcmu_get_cpu_opp);
281
282bool prcmu_has_arm_maxopp(void)
283{
284 return (readb(PRCM_AVS_VARM_MAX_OPP) & PRCM_AVS_ISMODEENABLE_MASK)
285 == PRCM_AVS_ISMODEENABLE_MASK;
286}
287
288static void read_mailbox_0(void)
289{
290 writel(MBOX_BIT(0), PRCM_ARM_IT1_CLEAR);
291}
292
293static void read_mailbox_1(void)
294{
295 mb1_transfer.ack.arm_opp = readb(ACK_MB1_CURR_ARMOPP);
296 mb1_transfer.ack.ape_opp = readb(ACK_MB1_CURR_APEOPP);
297 complete(&mb1_transfer.work);
298 writel(MBOX_BIT(1), PRCM_ARM_IT1_CLEAR);
299}
300
301static void read_mailbox_2(void)
302{
303 writel(MBOX_BIT(2), PRCM_ARM_IT1_CLEAR);
304}
305
306static void read_mailbox_3(void)
307{
308 writel(MBOX_BIT(3), PRCM_ARM_IT1_CLEAR);
309}
310
311static void read_mailbox_4(void)
312{
313 writel(MBOX_BIT(4), PRCM_ARM_IT1_CLEAR);
314}
315
316static void read_mailbox_5(void)
317{
318 mb5_transfer.ack.status = readb(ACK_MB5_I2C_STATUS);
319 mb5_transfer.ack.value = readb(ACK_MB5_I2C_VAL);
320 complete(&mb5_transfer.work);
321 writel(MBOX_BIT(5), PRCM_ARM_IT1_CLEAR);
322}
323
324static void read_mailbox_6(void)
325{
326 writel(MBOX_BIT(6), PRCM_ARM_IT1_CLEAR);
327}
328
329static void read_mailbox_7(void)
330{
331 writel(MBOX_BIT(7), PRCM_ARM_IT1_CLEAR);
332}
333
334static void (* const read_mailbox[NUM_MBOX])(void) = {
335 read_mailbox_0,
336 read_mailbox_1,
337 read_mailbox_2,
338 read_mailbox_3,
339 read_mailbox_4,
340 read_mailbox_5,
341 read_mailbox_6,
342 read_mailbox_7
343};
344
345static irqreturn_t prcmu_irq_handler(int irq, void *data)
346{
347 u32 bits;
348 u8 n;
349
350 bits = (readl(PRCM_ARM_IT1_VAL) & (MBOX_BIT(NUM_MBOX) - 1));
351 if (unlikely(!bits))
352 return IRQ_NONE;
353
354 for (n = 0; bits; n++) {
355 if (bits & MBOX_BIT(n)) {
356 bits -= MBOX_BIT(n);
357 read_mailbox[n]();
358 }
359 }
360 return IRQ_HANDLED;
361}
362
363void __init prcmu_early_init(void)
364{
365 if (cpu_is_u8500v11() || cpu_is_u8500ed()) {
366 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
367 } else if (cpu_is_u8500v2()) {
368 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
369 } else {
370 pr_err("prcmu: Unsupported chip version\n");
371 BUG();
372 }
373}
374
375static int __init prcmu_init(void)
376{
377 if (cpu_is_u8500ed()) {
378 pr_err("prcmu: Unsupported chip version\n");
379 return 0;
380 }
381
382 mutex_init(&mb1_transfer.lock);
383 init_completion(&mb1_transfer.work);
384 mutex_init(&mb5_transfer.lock);
385 init_completion(&mb5_transfer.work);
386
387 /* Clean up the mailbox interrupts after pre-kernel code. */
388 writel((MBOX_BIT(NUM_MBOX) - 1), PRCM_ARM_IT1_CLEAR);
389
390 return request_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler, 0,
391 "prcmu", NULL);
392}
393
394arch_initcall(prcmu_init);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 76f82ae44efb..3f17ea146f0e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -85,7 +85,7 @@ void show_mem(unsigned int filter)
85 struct meminfo * mi = &meminfo; 85 struct meminfo * mi = &meminfo;
86 86
87 printk("Mem-info:\n"); 87 printk("Mem-info:\n");
88 show_free_areas(); 88 show_free_areas(filter);
89 89
90 for_each_bank (i, mi) { 90 for_each_bank (i, mi) {
91 struct membank *bank = &mi->bank[i]; 91 struct membank *bank = &mi->bank[i];
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6cf76b3b68d1..08a92368d9d3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -31,8 +31,6 @@
31 31
32#include "mm.h" 32#include "mm.h"
33 33
34DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
35
36/* 34/*
37 * empty_zero_page is a special page that is used for 35 * empty_zero_page is a special page that is used for
38 * zero-initialized data and COW. 36 * zero-initialized data and COW.
diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c
index a7314d44b17b..2798c2d4a1cf 100644
--- a/arch/avr32/mm/init.c
+++ b/arch/avr32/mm/init.c
@@ -25,8 +25,6 @@
25#include <asm/setup.h> 25#include <asm/setup.h>
26#include <asm/sections.h> 26#include <asm/sections.h>
27 27
28DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
29
30pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data; 28pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
31 29
32struct page *empty_zero_page; 30struct page *empty_zero_page;
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 8addb1220b4f..a18180f2d007 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -24,11 +24,13 @@ config BLACKFIN
24 select HAVE_FUNCTION_TRACER 24 select HAVE_FUNCTION_TRACER
25 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 25 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
26 select HAVE_IDE 26 select HAVE_IDE
27 select HAVE_IRQ_WORK
27 select HAVE_KERNEL_GZIP if RAMKERNEL 28 select HAVE_KERNEL_GZIP if RAMKERNEL
28 select HAVE_KERNEL_BZIP2 if RAMKERNEL 29 select HAVE_KERNEL_BZIP2 if RAMKERNEL
29 select HAVE_KERNEL_LZMA if RAMKERNEL 30 select HAVE_KERNEL_LZMA if RAMKERNEL
30 select HAVE_KERNEL_LZO if RAMKERNEL 31 select HAVE_KERNEL_LZO if RAMKERNEL
31 select HAVE_OPROFILE 32 select HAVE_OPROFILE
33 select HAVE_PERF_EVENTS
32 select ARCH_WANT_OPTIONAL_GPIOLIB 34 select ARCH_WANT_OPTIONAL_GPIOLIB
33 select HAVE_GENERIC_HARDIRQS 35 select HAVE_GENERIC_HARDIRQS
34 select GENERIC_ATOMIC64 36 select GENERIC_ATOMIC64
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index 2641731f24cd..e2a3d4c8ab9a 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -9,15 +9,6 @@ config DEBUG_STACKOVERFLOW
9 This option will cause messages to be printed if free stack space 9 This option will cause messages to be printed if free stack space
10 drops below a certain limit. 10 drops below a certain limit.
11 11
12config DEBUG_STACK_USAGE
13 bool "Enable stack utilization instrumentation"
14 depends on DEBUG_KERNEL
15 help
16 Enables the display of the minimum amount of free stack which each
17 task has ever had available in the sysrq-T output.
18
19 This option will slow down process creation somewhat.
20
21config DEBUG_VERBOSE 12config DEBUG_VERBOSE
22 bool "Verbose fault messages" 13 bool "Verbose fault messages"
23 default y 14 default y
@@ -32,7 +23,7 @@ config DEBUG_VERBOSE
32 Most people should say N here. 23 Most people should say N here.
33 24
34config DEBUG_MMRS 25config DEBUG_MMRS
35 bool "Generate Blackfin MMR tree" 26 tristate "Generate Blackfin MMR tree"
36 select DEBUG_FS 27 select DEBUG_FS
37 help 28 help
38 Create a tree of Blackfin MMRs via the debugfs tree. If 29 Create a tree of Blackfin MMRs via the debugfs tree. If
diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
index 95cf2ba9de17..8465b3e6b862 100644
--- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
@@ -121,13 +121,11 @@ CONFIG_LOGO=y
121# CONFIG_LOGO_LINUX_VGA16 is not set 121# CONFIG_LOGO_LINUX_VGA16 is not set
122# CONFIG_LOGO_LINUX_CLUT224 is not set 122# CONFIG_LOGO_LINUX_CLUT224 is not set
123# CONFIG_LOGO_BLACKFIN_VGA16 is not set 123# CONFIG_LOGO_BLACKFIN_VGA16 is not set
124CONFIG_SOUND=m 124CONFIG_SOUND=y
125CONFIG_SND=m 125CONFIG_SND=y
126CONFIG_SND_SOC=m 126CONFIG_SND_SOC=y
127CONFIG_SND_BF5XX_I2S=m 127CONFIG_SND_BF5XX_I2S=y
128CONFIG_SND_BF5XX_SOC_SSM2602=m 128CONFIG_SND_BF5XX_SOC_SSM2602=y
129CONFIG_SND_BF5XX_AC97=m
130CONFIG_SND_BF5XX_SOC_AD1980=m
131CONFIG_HID_A4TECH=y 129CONFIG_HID_A4TECH=y
132CONFIG_HID_APPLE=y 130CONFIG_HID_APPLE=y
133CONFIG_HID_BELKIN=y 131CONFIG_HID_BELKIN=y
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 8be8e33fac52..5e7321b26040 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -96,7 +96,7 @@ CONFIG_SERIAL_BFIN_UART1=y
96# CONFIG_HW_RANDOM is not set 96# CONFIG_HW_RANDOM is not set
97CONFIG_I2C=y 97CONFIG_I2C=y
98CONFIG_I2C_CHARDEV=m 98CONFIG_I2C_CHARDEV=m
99CONFIG_I2C_BLACKFIN_TWI=m 99CONFIG_I2C_BLACKFIN_TWI=y
100CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 100CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
101CONFIG_SPI=y 101CONFIG_SPI=y
102CONFIG_SPI_BFIN=y 102CONFIG_SPI_BFIN=y
@@ -115,13 +115,11 @@ CONFIG_LOGO=y
115# CONFIG_LOGO_LINUX_VGA16 is not set 115# CONFIG_LOGO_LINUX_VGA16 is not set
116# CONFIG_LOGO_LINUX_CLUT224 is not set 116# CONFIG_LOGO_LINUX_CLUT224 is not set
117# CONFIG_LOGO_BLACKFIN_VGA16 is not set 117# CONFIG_LOGO_BLACKFIN_VGA16 is not set
118CONFIG_SOUND=m 118CONFIG_SOUND=y
119CONFIG_SND=m 119CONFIG_SND=y
120CONFIG_SND_SOC=m 120CONFIG_SND_SOC=y
121CONFIG_SND_BF5XX_I2S=m 121CONFIG_SND_BF5XX_I2S=y
122CONFIG_SND_BF5XX_SOC_SSM2602=m 122CONFIG_SND_BF5XX_SOC_SSM2602=y
123CONFIG_SND_BF5XX_AC97=m
124CONFIG_SND_BF5XX_SOC_AD1980=m
125CONFIG_HID_A4TECH=y 123CONFIG_HID_A4TECH=y
126CONFIG_HID_APPLE=y 124CONFIG_HID_APPLE=y
127CONFIG_HID_BELKIN=y 125CONFIG_HID_BELKIN=y
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index 0aafde6c8c2d..b90d3792ed52 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -99,8 +99,6 @@ CONFIG_SND_PCM_OSS=m
99CONFIG_SND_SOC=m 99CONFIG_SND_SOC=m
100CONFIG_SND_BF5XX_I2S=m 100CONFIG_SND_BF5XX_I2S=m
101CONFIG_SND_BF5XX_SOC_AD73311=m 101CONFIG_SND_BF5XX_SOC_AD73311=m
102CONFIG_SND_BF5XX_AC97=m
103CONFIG_SND_BF5XX_SOC_AD1980=m
104# CONFIG_USB_SUPPORT is not set 102# CONFIG_USB_SUPPORT is not set
105CONFIG_RTC_CLASS=y 103CONFIG_RTC_CLASS=y
106CONFIG_RTC_DRV_BFIN=y 104CONFIG_RTC_DRV_BFIN=y
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index c9077fb58135..005362537a7b 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -110,8 +110,6 @@ CONFIG_SND_PCM_OSS=m
110CONFIG_SND_SOC=m 110CONFIG_SND_SOC=m
111CONFIG_SND_BF5XX_I2S=m 111CONFIG_SND_BF5XX_I2S=m
112CONFIG_SND_BF5XX_SOC_AD73311=m 112CONFIG_SND_BF5XX_SOC_AD73311=m
113CONFIG_SND_BF5XX_AC97=m
114CONFIG_SND_BF5XX_SOC_AD1980=m
115# CONFIG_USB_SUPPORT is not set 113# CONFIG_USB_SUPPORT is not set
116CONFIG_RTC_CLASS=y 114CONFIG_RTC_CLASS=y
117CONFIG_RTC_DRV_BFIN=y 115CONFIG_RTC_DRV_BFIN=y
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index 121cc04d877d..17bcbf60bcae 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -49,16 +49,6 @@ extern void dump_bfin_trace_buffer(void);
49#define dump_bfin_trace_buffer() 49#define dump_bfin_trace_buffer()
50#endif 50#endif
51 51
52/* init functions only */
53extern int init_arch_irq(void);
54extern void init_exception_vectors(void);
55extern void program_IAR(void);
56
57extern asmlinkage void lower_to_irq14(void);
58extern asmlinkage void bfin_return_from_exception(void);
59extern asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
60extern int bfin_internal_set_wake(unsigned int irq, unsigned int state);
61
62extern void *l1_data_A_sram_alloc(size_t); 52extern void *l1_data_A_sram_alloc(size_t);
63extern void *l1_data_B_sram_alloc(size_t); 53extern void *l1_data_B_sram_alloc(size_t);
64extern void *l1_inst_sram_alloc(size_t); 54extern void *l1_inst_sram_alloc(size_t);
diff --git a/arch/blackfin/include/asm/bfin_pfmon.h b/arch/blackfin/include/asm/bfin_pfmon.h
new file mode 100644
index 000000000000..accd47e2db40
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_pfmon.h
@@ -0,0 +1,44 @@
1/*
2 * Blackfin Performance Monitor definitions
3 *
4 * Copyright 2005-2011 Analog Devices Inc.
5 *
6 * Licensed under the ADI BSD license or GPL-2 (or later).
7 */
8
9#ifndef __ASM_BFIN_PFMON_H__
10#define __ASM_BFIN_PFMON_H__
11
12/* PFCTL Masks */
13#define PFMON_MASK 0xff
14#define PFCEN_MASK 0x3
15#define PFCEN_DISABLE 0x0
16#define PFCEN_ENABLE_USER 0x1
17#define PFCEN_ENABLE_SUPV 0x2
18#define PFCEN_ENABLE_ALL (PFCEN_ENABLE_USER | PFCEN_ENABLE_SUPV)
19
20#define PFPWR_P 0
21#define PEMUSW0_P 2
22#define PFCEN0_P 3
23#define PFMON0_P 5
24#define PEMUSW1_P 13
25#define PFCEN1_P 14
26#define PFMON1_P 16
27#define PFCNT0_P 24
28#define PFCNT1_P 25
29
30#define PFPWR (1 << PFPWR_P)
31#define PEMUSW(n, x) ((x) << ((n) ? PEMUSW1_P : PEMUSW0_P))
32#define PEMUSW0 PEMUSW(0, 1)
33#define PEMUSW1 PEMUSW(1, 1)
34#define PFCEN(n, x) ((x) << ((n) ? PFCEN1_P : PFCEN0_P))
35#define PFCEN0 PFCEN(0, PFCEN_MASK)
36#define PFCEN1 PFCEN(1, PFCEN_MASK)
37#define PFCNT(n, x) ((x) << ((n) ? PFCNT1_P : PFCNT0_P))
38#define PFCNT0 PFCNT(0, 1)
39#define PFCNT1 PFCNT(1, 1)
40#define PFMON(n, x) ((x) << ((n) ? PFMON1_P : PFMON0_P))
41#define PFMON0 PFMON(0, PFMON_MASK)
42#define PFMON1 PFMON(1, PFMON_MASK)
43
44#endif
diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h
index d27600c262c2..f8568a31d0ab 100644
--- a/arch/blackfin/include/asm/bfin_sport.h
+++ b/arch/blackfin/include/asm/bfin_sport.h
@@ -100,6 +100,10 @@ struct sport_register {
100}; 100};
101#undef __BFP 101#undef __BFP
102 102
103struct bfin_snd_platform_data {
104 const unsigned short *pin_req;
105};
106
103#define bfin_read_sport_rx32(base) \ 107#define bfin_read_sport_rx32(base) \
104({ \ 108({ \
105 struct sport_register *__mmrs = (void *)base; \ 109 struct sport_register *__mmrs = (void *)base; \
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 77135b62818e..9a5b2c572ebf 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -39,8 +39,13 @@ extern void blackfin_invalidate_entire_icache(void);
39 39
40static inline void flush_icache_range(unsigned start, unsigned end) 40static inline void flush_icache_range(unsigned start, unsigned end)
41{ 41{
42#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK) 42#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
43 blackfin_dcache_flush_range(start, end); 43 if (end <= physical_mem_end)
44 blackfin_dcache_flush_range(start, end);
45#endif
46#if defined(CONFIG_BFIN_L2_WRITEBACK)
47 if (start >= L2_START && end <= L2_START + L2_LENGTH)
48 blackfin_dcache_flush_range(start, end);
44#endif 49#endif
45 50
46 /* Make sure all write buffers in the data side of the core 51 /* Make sure all write buffers in the data side of the core
@@ -52,9 +57,17 @@ static inline void flush_icache_range(unsigned start, unsigned end)
52 * the pipeline. 57 * the pipeline.
53 */ 58 */
54 SSYNC(); 59 SSYNC();
55#if defined(CONFIG_BFIN_ICACHE) 60#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
56 blackfin_icache_flush_range(start, end); 61 if (end <= physical_mem_end) {
57 flush_icache_range_others(start, end); 62 blackfin_icache_flush_range(start, end);
63 flush_icache_range_others(start, end);
64 }
65#endif
66#if defined(CONFIG_BFIN_L2_ICACHEABLE)
67 if (start >= L2_START && end <= L2_START + L2_LENGTH) {
68 blackfin_icache_flush_range(start, end);
69 flush_icache_range_others(start, end);
70 }
58#endif 71#endif
59} 72}
60 73
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h
index 16883e582e3c..05043786da21 100644
--- a/arch/blackfin/include/asm/cpu.h
+++ b/arch/blackfin/include/asm/cpu.h
@@ -10,11 +10,8 @@
10 10
11#include <linux/percpu.h> 11#include <linux/percpu.h>
12 12
13struct task_struct;
14
15struct blackfin_cpudata { 13struct blackfin_cpudata {
16 struct cpu cpu; 14 struct cpu cpu;
17 struct task_struct *idle;
18 unsigned int imemctl; 15 unsigned int imemctl;
19 unsigned int dmemctl; 16 unsigned int dmemctl;
20}; 17};
diff --git a/arch/blackfin/include/asm/def_LPBlackfin.h b/arch/blackfin/include/asm/def_LPBlackfin.h
index 7600fe0696af..823679011457 100644
--- a/arch/blackfin/include/asm/def_LPBlackfin.h
+++ b/arch/blackfin/include/asm/def_LPBlackfin.h
@@ -52,10 +52,10 @@
52 52
53#define bfin_read(addr) \ 53#define bfin_read(addr) \
54({ \ 54({ \
55 sizeof(*(addr)) == 1 ? bfin_read8(addr) : \ 55 sizeof(*(addr)) == 1 ? bfin_read8(addr) : \
56 sizeof(*(addr)) == 2 ? bfin_read16(addr) : \ 56 sizeof(*(addr)) == 2 ? bfin_read16(addr) : \
57 sizeof(*(addr)) == 4 ? bfin_read32(addr) : \ 57 sizeof(*(addr)) == 4 ? bfin_read32(addr) : \
58 ({ BUG(); 0; }); \ 58 ({ BUG(); 0; }); \
59}) 59})
60#define bfin_write(addr, val) \ 60#define bfin_write(addr, val) \
61do { \ 61do { \
@@ -69,13 +69,13 @@ do { \
69 69
70#define bfin_write_or(addr, bits) \ 70#define bfin_write_or(addr, bits) \
71do { \ 71do { \
72 void *__addr = (void *)(addr); \ 72 typeof(addr) __addr = (addr); \
73 bfin_write(__addr, bfin_read(__addr) | (bits)); \ 73 bfin_write(__addr, bfin_read(__addr) | (bits)); \
74} while (0) 74} while (0)
75 75
76#define bfin_write_and(addr, bits) \ 76#define bfin_write_and(addr, bits) \
77do { \ 77do { \
78 void *__addr = (void *)(addr); \ 78 typeof(addr) __addr = (addr); \
79 bfin_write(__addr, bfin_read(__addr) & (bits)); \ 79 bfin_write(__addr, bfin_read(__addr) & (bits)); \
80} while (0) 80} while (0)
81 81
diff --git a/arch/blackfin/include/asm/irq_handler.h b/arch/blackfin/include/asm/irq_handler.h
index 7fbe42307b9a..ee73f79aef10 100644
--- a/arch/blackfin/include/asm/irq_handler.h
+++ b/arch/blackfin/include/asm/irq_handler.h
@@ -10,6 +10,16 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12 12
13/* init functions only */
14extern int __init init_arch_irq(void);
15extern void init_exception_vectors(void);
16extern void __init program_IAR(void);
17#ifdef init_mach_irq
18extern void __init init_mach_irq(void);
19#else
20# define init_mach_irq()
21#endif
22
13/* BASE LEVEL interrupt handler routines */ 23/* BASE LEVEL interrupt handler routines */
14asmlinkage void evt_exception(void); 24asmlinkage void evt_exception(void);
15asmlinkage void trap(void); 25asmlinkage void trap(void);
@@ -37,4 +47,19 @@ extern void return_from_exception(void);
37extern int bfin_request_exception(unsigned int exception, void (*handler)(void)); 47extern int bfin_request_exception(unsigned int exception, void (*handler)(void));
38extern int bfin_free_exception(unsigned int exception, void (*handler)(void)); 48extern int bfin_free_exception(unsigned int exception, void (*handler)(void));
39 49
50extern asmlinkage void lower_to_irq14(void);
51extern asmlinkage void bfin_return_from_exception(void);
52extern asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
53extern int bfin_internal_set_wake(unsigned int irq, unsigned int state);
54
55struct irq_data;
56extern void bfin_handle_irq(unsigned irq);
57extern void bfin_ack_noop(struct irq_data *);
58extern void bfin_internal_mask_irq(unsigned int irq);
59extern void bfin_internal_unmask_irq(unsigned int irq);
60
61struct irq_desc;
62extern void bfin_demux_mac_status_irq(unsigned int, struct irq_desc *);
63extern void bfin_demux_gpio_irq(unsigned int, struct irq_desc *);
64
40#endif 65#endif
diff --git a/arch/blackfin/include/asm/kgdb.h b/arch/blackfin/include/asm/kgdb.h
index 8651afe12990..3ac0c72e9fee 100644
--- a/arch/blackfin/include/asm/kgdb.h
+++ b/arch/blackfin/include/asm/kgdb.h
@@ -103,7 +103,11 @@ static inline void arch_kgdb_breakpoint(void)
103 asm("EXCPT 2;"); 103 asm("EXCPT 2;");
104} 104}
105#define BREAK_INSTR_SIZE 2 105#define BREAK_INSTR_SIZE 2
106#define CACHE_FLUSH_IS_SAFE 1 106#ifdef CONFIG_SMP
107# define CACHE_FLUSH_IS_SAFE 0
108#else
109# define CACHE_FLUSH_IS_SAFE 1
110#endif
107#define HW_INST_WATCHPOINT_NUM 6 111#define HW_INST_WATCHPOINT_NUM 6
108#define HW_WATCHPOINT_NUM 8 112#define HW_WATCHPOINT_NUM 8
109#define TYPE_INST_WATCHPOINT 0 113#define TYPE_INST_WATCHPOINT 0
diff --git a/arch/blackfin/include/asm/perf_event.h b/arch/blackfin/include/asm/perf_event.h
new file mode 100644
index 000000000000..3d2b1716322f
--- /dev/null
+++ b/arch/blackfin/include/asm/perf_event.h
@@ -0,0 +1 @@
#define MAX_HWEVENTS 2
diff --git a/arch/blackfin/include/asm/ptrace.h b/arch/blackfin/include/asm/ptrace.h
index 832d7c009a2c..1066d63e62b5 100644
--- a/arch/blackfin/include/asm/ptrace.h
+++ b/arch/blackfin/include/asm/ptrace.h
@@ -108,8 +108,6 @@ struct pt_regs {
108extern void show_regs(struct pt_regs *); 108extern void show_regs(struct pt_regs *);
109 109
110#define arch_has_single_step() (1) 110#define arch_has_single_step() (1)
111extern void user_enable_single_step(struct task_struct *child);
112extern void user_disable_single_step(struct task_struct *child);
113/* common code demands this function */ 111/* common code demands this function */
114#define ptrace_disable(child) user_disable_single_step(child) 112#define ptrace_disable(child) user_disable_single_step(child)
115 113
diff --git a/arch/blackfin/include/mach-common/irq.h b/arch/blackfin/include/mach-common/irq.h
new file mode 100644
index 000000000000..cab14e911dc2
--- /dev/null
+++ b/arch/blackfin/include/mach-common/irq.h
@@ -0,0 +1,57 @@
1/*
2 * Common Blackfin IRQ definitions (i.e. the CEC)
3 *
4 * Copyright 2005-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later
7 */
8
9#ifndef _MACH_COMMON_IRQ_H_
10#define _MACH_COMMON_IRQ_H_
11
12/*
13 * Core events interrupt source definitions
14 *
15 * Event Source Event Name
16 * Emulation EMU 0 (highest priority)
17 * Reset RST 1
18 * NMI NMI 2
19 * Exception EVX 3
20 * Reserved -- 4
21 * Hardware Error IVHW 5
22 * Core Timer IVTMR 6
23 * Peripherals IVG7 7
24 * Peripherals IVG8 8
25 * Peripherals IVG9 9
26 * Peripherals IVG10 10
27 * Peripherals IVG11 11
28 * Peripherals IVG12 12
29 * Peripherals IVG13 13
30 * Softirq IVG14 14
31 * System Call IVG15 15 (lowest priority)
32 */
33
34/* The ABSTRACT IRQ definitions */
35#define IRQ_EMU 0 /* Emulation */
36#define IRQ_RST 1 /* reset */
37#define IRQ_NMI 2 /* Non Maskable */
38#define IRQ_EVX 3 /* Exception */
39#define IRQ_UNUSED 4 /* - unused interrupt */
40#define IRQ_HWERR 5 /* Hardware Error */
41#define IRQ_CORETMR 6 /* Core timer */
42
43#define BFIN_IRQ(x) ((x) + 7)
44
45#define IVG7 7
46#define IVG8 8
47#define IVG9 9
48#define IVG10 10
49#define IVG11 11
50#define IVG12 12
51#define IVG13 13
52#define IVG14 14
53#define IVG15 15
54
55#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
56
57#endif
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index ca5ccc777772..d550b24d9e9b 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -33,7 +33,10 @@ obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o
33obj-$(CONFIG_STACKTRACE) += stacktrace.o 33obj-$(CONFIG_STACKTRACE) += stacktrace.o
34obj-$(CONFIG_DEBUG_VERBOSE) += trace.o 34obj-$(CONFIG_DEBUG_VERBOSE) += trace.o
35obj-$(CONFIG_BFIN_PSEUDODBG_INSNS) += pseudodbg.o 35obj-$(CONFIG_BFIN_PSEUDODBG_INSNS) += pseudodbg.o
36obj-$(CONFIG_PERF_EVENTS) += perf_event.o
36 37
37# the kgdb test puts code into L2 and without linker 38# the kgdb test puts code into L2 and without linker
38# relaxation, we need to force long calls to/from it 39# relaxation, we need to force long calls to/from it
39CFLAGS_kgdb_test.o := -mlong-calls -O0 40CFLAGS_kgdb_test.o := -mlong-calls -O0
41
42obj-$(CONFIG_DEBUG_MMRS) += debug-mmrs.o
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 6ce8dce753c9..71dbaa4a48af 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -36,6 +36,11 @@ static int __init blackfin_dma_init(void)
36 36
37 printk(KERN_INFO "Blackfin DMA Controller\n"); 37 printk(KERN_INFO "Blackfin DMA Controller\n");
38 38
39
40#if ANOMALY_05000480
41 bfin_write_DMAC_TC_PER(0x0111);
42#endif
43
39 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 44 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
40 atomic_set(&dma_ch[i].chan_status, 0); 45 atomic_set(&dma_ch[i].chan_status, 0);
41 dma_ch[i].regs = dma_io_base_addr[i]; 46 dma_ch[i].regs = dma_io_base_addr[i];
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index 170cf90735ba..bcf8cf6fe412 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -10,10 +10,12 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/err.h> 11#include <linux/err.h>
12#include <linux/proc_fs.h> 12#include <linux/proc_fs.h>
13#include <linux/seq_file.h>
13#include <asm/blackfin.h> 14#include <asm/blackfin.h>
14#include <asm/gpio.h> 15#include <asm/gpio.h>
15#include <asm/portmux.h> 16#include <asm/portmux.h>
16#include <linux/irq.h> 17#include <linux/irq.h>
18#include <asm/irq_handler.h>
17 19
18#if ANOMALY_05000311 || ANOMALY_05000323 20#if ANOMALY_05000311 || ANOMALY_05000323
19enum { 21enum {
@@ -534,7 +536,7 @@ static const unsigned int sic_iwr_irqs[] = {
534#if defined(BF533_FAMILY) 536#if defined(BF533_FAMILY)
535 IRQ_PROG_INTB 537 IRQ_PROG_INTB
536#elif defined(BF537_FAMILY) 538#elif defined(BF537_FAMILY)
537 IRQ_PROG_INTB, IRQ_PORTG_INTB, IRQ_MAC_TX 539 IRQ_PF_INTB_WATCH, IRQ_PORTG_INTB, IRQ_PH_INTB_MAC_TX
538#elif defined(BF538_FAMILY) 540#elif defined(BF538_FAMILY)
539 IRQ_PORTF_INTB 541 IRQ_PORTF_INTB
540#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) 542#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
@@ -1203,35 +1205,43 @@ void bfin_reset_boot_spi_cs(unsigned short pin)
1203} 1205}
1204 1206
1205#if defined(CONFIG_PROC_FS) 1207#if defined(CONFIG_PROC_FS)
1206static int gpio_proc_read(char *buf, char **start, off_t offset, 1208static int gpio_proc_show(struct seq_file *m, void *v)
1207 int len, int *unused_i, void *unused_v)
1208{ 1209{
1209 int c, irq, gpio, outlen = 0; 1210 int c, irq, gpio;
1210 1211
1211 for (c = 0; c < MAX_RESOURCES; c++) { 1212 for (c = 0; c < MAX_RESOURCES; c++) {
1212 irq = is_reserved(gpio_irq, c, 1); 1213 irq = is_reserved(gpio_irq, c, 1);
1213 gpio = is_reserved(gpio, c, 1); 1214 gpio = is_reserved(gpio, c, 1);
1214 if (!check_gpio(c) && (gpio || irq)) 1215 if (!check_gpio(c) && (gpio || irq))
1215 len = sprintf(buf, "GPIO_%d: \t%s%s \t\tGPIO %s\n", c, 1216 seq_printf(m, "GPIO_%d: \t%s%s \t\tGPIO %s\n", c,
1216 get_label(c), (gpio && irq) ? " *" : "", 1217 get_label(c), (gpio && irq) ? " *" : "",
1217 get_gpio_dir(c) ? "OUTPUT" : "INPUT"); 1218 get_gpio_dir(c) ? "OUTPUT" : "INPUT");
1218 else if (is_reserved(peri, c, 1)) 1219 else if (is_reserved(peri, c, 1))
1219 len = sprintf(buf, "GPIO_%d: \t%s \t\tPeripheral\n", c, get_label(c)); 1220 seq_printf(m, "GPIO_%d: \t%s \t\tPeripheral\n", c, get_label(c));
1220 else 1221 else
1221 continue; 1222 continue;
1222 buf += len;
1223 outlen += len;
1224 } 1223 }
1225 return outlen; 1224
1225 return 0;
1226} 1226}
1227 1227
1228static int gpio_proc_open(struct inode *inode, struct file *file)
1229{
1230 return single_open(file, gpio_proc_show, NULL);
1231}
1232
1233static const struct file_operations gpio_proc_ops = {
1234 .open = gpio_proc_open,
1235 .read = seq_read,
1236 .llseek = seq_lseek,
1237 .release = single_release,
1238};
1239
1228static __init int gpio_register_proc(void) 1240static __init int gpio_register_proc(void)
1229{ 1241{
1230 struct proc_dir_entry *proc_gpio; 1242 struct proc_dir_entry *proc_gpio;
1231 1243
1232 proc_gpio = create_proc_entry("gpio", S_IRUGO, NULL); 1244 proc_gpio = proc_create("gpio", S_IRUGO, NULL, &gpio_proc_ops);
1233 if (proc_gpio)
1234 proc_gpio->read_proc = gpio_proc_read;
1235 return proc_gpio != NULL; 1245 return proc_gpio != NULL;
1236} 1246}
1237__initcall(gpio_register_proc); 1247__initcall(gpio_register_proc);
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 2c264b51566a..c446591b961d 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -11,6 +11,7 @@
11 11
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/irq_handler.h>
14 15
15/* Allow people to have their own Blackfin exception handler in a module */ 16/* Allow people to have their own Blackfin exception handler in a module */
16EXPORT_SYMBOL(bfin_return_from_exception); 17EXPORT_SYMBOL(bfin_return_from_exception);
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
new file mode 100644
index 000000000000..94b1d8a0256a
--- /dev/null
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -0,0 +1,1860 @@
1/*
2 * debugfs interface to core/system MMRs
3 *
4 * Copyright 2007-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later
7 */
8
9#include <linux/debugfs.h>
10#include <linux/fs.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13
14#include <asm/blackfin.h>
15#include <asm/gpio.h>
16#include <asm/bfin_can.h>
17#include <asm/bfin_dma.h>
18#include <asm/bfin_ppi.h>
19#include <asm/bfin_serial.h>
20#include <asm/bfin5xx_spi.h>
21#include <asm/bfin_twi.h>
22
23/* Common code defines PORT_MUX on us, so redirect the MMR back locally */
24#ifdef BFIN_PORT_MUX
25#undef PORT_MUX
26#define PORT_MUX BFIN_PORT_MUX
27#endif
28
29#define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)addr)
30#define d(name, bits, addr) _d(name, bits, addr, S_IRUSR|S_IWUSR)
31#define d_RO(name, bits, addr) _d(name, bits, addr, S_IRUSR)
32#define d_WO(name, bits, addr) _d(name, bits, addr, S_IWUSR)
33
34#define D_RO(name, bits) d_RO(#name, bits, name)
35#define D_WO(name, bits) d_WO(#name, bits, name)
36#define D32(name) d(#name, 32, name)
37#define D16(name) d(#name, 16, name)
38
39#define REGS_OFF(peri, mmr) offsetof(struct bfin_##peri##_regs, mmr)
40#define __REGS(peri, sname, rname) \
41 do { \
42 struct bfin_##peri##_regs r; \
43 void *addr = (void *)(base + REGS_OFF(peri, rname)); \
44 strcpy(_buf, sname); \
45 if (sizeof(r.rname) == 2) \
46 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, parent, addr); \
47 else \
48 debugfs_create_x32(buf, S_IRUSR|S_IWUSR, parent, addr); \
49 } while (0)
50#define REGS_STR_PFX(buf, pfx, num) \
51 ({ \
52 buf + (num >= 0 ? \
53 sprintf(buf, #pfx "%i_", num) : \
54 sprintf(buf, #pfx "_")); \
55 })
56#define REGS_STR_PFX_C(buf, pfx, num) \
57 ({ \
58 buf + (num >= 0 ? \
59 sprintf(buf, #pfx "%c_", 'A' + num) : \
60 sprintf(buf, #pfx "_")); \
61 })
62
63/*
64 * Core registers (not memory mapped)
65 */
66extern u32 last_seqstat;
67
68static int debug_cclk_get(void *data, u64 *val)
69{
70 *val = get_cclk();
71 return 0;
72}
73DEFINE_SIMPLE_ATTRIBUTE(fops_debug_cclk, debug_cclk_get, NULL, "0x%08llx\n");
74
75static int debug_sclk_get(void *data, u64 *val)
76{
77 *val = get_sclk();
78 return 0;
79}
80DEFINE_SIMPLE_ATTRIBUTE(fops_debug_sclk, debug_sclk_get, NULL, "0x%08llx\n");
81
82#define DEFINE_SYSREG(sr, pre, post) \
83static int sysreg_##sr##_get(void *data, u64 *val) \
84{ \
85 unsigned long tmp; \
86 pre; \
87 __asm__ __volatile__("%0 = " #sr ";" : "=d"(tmp)); \
88 *val = tmp; \
89 return 0; \
90} \
91static int sysreg_##sr##_set(void *data, u64 val) \
92{ \
93 unsigned long tmp = val; \
94 __asm__ __volatile__(#sr " = %0;" : : "d"(tmp)); \
95 post; \
96 return 0; \
97} \
98DEFINE_SIMPLE_ATTRIBUTE(fops_sysreg_##sr, sysreg_##sr##_get, sysreg_##sr##_set, "0x%08llx\n")
99
100DEFINE_SYSREG(cycles, , );
101DEFINE_SYSREG(cycles2, __asm__ __volatile__("%0 = cycles;" : "=d"(tmp)), );
102DEFINE_SYSREG(emudat, , );
103DEFINE_SYSREG(seqstat, , );
104DEFINE_SYSREG(syscfg, , CSYNC());
105#define D_SYSREG(sr) debugfs_create_file(#sr, S_IRUSR|S_IWUSR, parent, NULL, &fops_sysreg_##sr)
106
107/*
108 * CAN
109 */
110#define CAN_OFF(mmr) REGS_OFF(can, mmr)
111#define __CAN(uname, lname) __REGS(can, #uname, lname)
112static void __init __maybe_unused
113bfin_debug_mmrs_can(struct dentry *parent, unsigned long base, int num)
114{
115 static struct dentry *am, *mb;
116 int i, j;
117 char buf[32], *_buf = REGS_STR_PFX(buf, CAN, num);
118
119 if (!am) {
120 am = debugfs_create_dir("am", parent);
121 mb = debugfs_create_dir("mb", parent);
122 }
123
124 __CAN(MC1, mc1);
125 __CAN(MD1, md1);
126 __CAN(TRS1, trs1);
127 __CAN(TRR1, trr1);
128 __CAN(TA1, ta1);
129 __CAN(AA1, aa1);
130 __CAN(RMP1, rmp1);
131 __CAN(RML1, rml1);
132 __CAN(MBTIF1, mbtif1);
133 __CAN(MBRIF1, mbrif1);
134 __CAN(MBIM1, mbim1);
135 __CAN(RFH1, rfh1);
136 __CAN(OPSS1, opss1);
137
138 __CAN(MC2, mc2);
139 __CAN(MD2, md2);
140 __CAN(TRS2, trs2);
141 __CAN(TRR2, trr2);
142 __CAN(TA2, ta2);
143 __CAN(AA2, aa2);
144 __CAN(RMP2, rmp2);
145 __CAN(RML2, rml2);
146 __CAN(MBTIF2, mbtif2);
147 __CAN(MBRIF2, mbrif2);
148 __CAN(MBIM2, mbim2);
149 __CAN(RFH2, rfh2);
150 __CAN(OPSS2, opss2);
151
152 __CAN(CLOCK, clock);
153 __CAN(TIMING, timing);
154 __CAN(DEBUG, debug);
155 __CAN(STATUS, status);
156 __CAN(CEC, cec);
157 __CAN(GIS, gis);
158 __CAN(GIM, gim);
159 __CAN(GIF, gif);
160 __CAN(CONTROL, control);
161 __CAN(INTR, intr);
162 __CAN(VERSION, version);
163 __CAN(MBTD, mbtd);
164 __CAN(EWR, ewr);
165 __CAN(ESR, esr);
166 /*__CAN(UCREG, ucreg); no longer exists */
167 __CAN(UCCNT, uccnt);
168 __CAN(UCRC, ucrc);
169 __CAN(UCCNF, uccnf);
170 __CAN(VERSION2, version2);
171
172 for (i = 0; i < 32; ++i) {
173 sprintf(_buf, "AM%02iL", i);
174 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, am,
175 (u16 *)(base + CAN_OFF(msk[i].aml)));
176 sprintf(_buf, "AM%02iH", i);
177 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, am,
178 (u16 *)(base + CAN_OFF(msk[i].amh)));
179
180 for (j = 0; j < 3; ++j) {
181 sprintf(_buf, "MB%02i_DATA%i", i, j);
182 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
183 (u16 *)(base + CAN_OFF(chl[i].data[j*2])));
184 }
185 sprintf(_buf, "MB%02i_LENGTH", i);
186 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
187 (u16 *)(base + CAN_OFF(chl[i].dlc)));
188 sprintf(_buf, "MB%02i_TIMESTAMP", i);
189 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
190 (u16 *)(base + CAN_OFF(chl[i].tsv)));
191 sprintf(_buf, "MB%02i_ID0", i);
192 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
193 (u16 *)(base + CAN_OFF(chl[i].id0)));
194 sprintf(_buf, "MB%02i_ID1", i);
195 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
196 (u16 *)(base + CAN_OFF(chl[i].id1)));
197 }
198}
199#define CAN(num) bfin_debug_mmrs_can(parent, CAN##num##_MC1, num)
200
201/*
202 * DMA
203 */
204#define __DMA(uname, lname) __REGS(dma, #uname, lname)
205static void __init __maybe_unused
206bfin_debug_mmrs_dma(struct dentry *parent, unsigned long base, int num, char mdma, const char *pfx)
207{
208 char buf[32], *_buf;
209
210 if (mdma)
211 _buf = buf + sprintf(buf, "%s_%c%i_", pfx, mdma, num);
212 else
213 _buf = buf + sprintf(buf, "%s%i_", pfx, num);
214
215 __DMA(NEXT_DESC_PTR, next_desc_ptr);
216 __DMA(START_ADDR, start_addr);
217 __DMA(CONFIG, config);
218 __DMA(X_COUNT, x_count);
219 __DMA(X_MODIFY, x_modify);
220 __DMA(Y_COUNT, y_count);
221 __DMA(Y_MODIFY, y_modify);
222 __DMA(CURR_DESC_PTR, curr_desc_ptr);
223 __DMA(CURR_ADDR, curr_addr);
224 __DMA(IRQ_STATUS, irq_status);
225 __DMA(PERIPHERAL_MAP, peripheral_map);
226 __DMA(CURR_X_COUNT, curr_x_count);
227 __DMA(CURR_Y_COUNT, curr_y_count);
228}
229#define _DMA(num, base, mdma, pfx) bfin_debug_mmrs_dma(parent, base, num, mdma, pfx "DMA")
230#define DMA(num) _DMA(num, DMA##num##_NEXT_DESC_PTR, 0, "")
231#define _MDMA(num, x) \
232 do { \
233 _DMA(num, x##DMA_D##num##_CONFIG, 'D', #x); \
234 _DMA(num, x##DMA_S##num##_CONFIG, 'S', #x); \
235 } while (0)
236#define MDMA(num) _MDMA(num, M)
237#define IMDMA(num) _MDMA(num, IM)
238
239/*
240 * EPPI
241 */
242#define __EPPI(uname, lname) __REGS(eppi, #uname, lname)
243static void __init __maybe_unused
244bfin_debug_mmrs_eppi(struct dentry *parent, unsigned long base, int num)
245{
246 char buf[32], *_buf = REGS_STR_PFX(buf, EPPI, num);
247 __EPPI(STATUS, status);
248 __EPPI(HCOUNT, hcount);
249 __EPPI(HDELAY, hdelay);
250 __EPPI(VCOUNT, vcount);
251 __EPPI(VDELAY, vdelay);
252 __EPPI(FRAME, frame);
253 __EPPI(LINE, line);
254 __EPPI(CLKDIV, clkdiv);
255 __EPPI(CONTROL, control);
256 __EPPI(FS1W_HBL, fs1w_hbl);
257 __EPPI(FS1P_AVPL, fs1p_avpl);
258 __EPPI(FS2W_LVB, fs2w_lvb);
259 __EPPI(FS2P_LAVF, fs2p_lavf);
260 __EPPI(CLIP, clip);
261}
262#define EPPI(num) bfin_debug_mmrs_eppi(parent, EPPI##num##_STATUS, num)
263
264/*
265 * General Purpose Timers
266 */
267#define GPTIMER_OFF(mmr) (TIMER0_##mmr - TIMER0_CONFIG)
268#define __GPTIMER(name) \
269 do { \
270 strcpy(_buf, #name); \
271 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, parent, (u16 *)(base + GPTIMER_OFF(name))); \
272 } while (0)
273static void __init __maybe_unused
274bfin_debug_mmrs_gptimer(struct dentry *parent, unsigned long base, int num)
275{
276 char buf[32], *_buf = REGS_STR_PFX(buf, TIMER, num);
277 __GPTIMER(CONFIG);
278 __GPTIMER(COUNTER);
279 __GPTIMER(PERIOD);
280 __GPTIMER(WIDTH);
281}
282#define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num)
283
284/*
285 * Handshake MDMA
286 */
287#define __HMDMA(uname, lname) __REGS(hmdma, #uname, lname)
288static void __init __maybe_unused
289bfin_debug_mmrs_hmdma(struct dentry *parent, unsigned long base, int num)
290{
291 char buf[32], *_buf = REGS_STR_PFX(buf, HMDMA, num);
292 __HMDMA(CONTROL, control);
293 __HMDMA(ECINIT, ecinit);
294 __HMDMA(BCINIT, bcinit);
295 __HMDMA(ECURGENT, ecurgent);
296 __HMDMA(ECOVERFLOW, ecoverflow);
297 __HMDMA(ECOUNT, ecount);
298 __HMDMA(BCOUNT, bcount);
299}
300#define HMDMA(num) bfin_debug_mmrs_hmdma(parent, HMDMA##num##_CONTROL, num)
301
302/*
303 * Port/GPIO
304 */
305#define bfin_gpio_regs gpio_port_t
306#define __PORT(uname, lname) __REGS(gpio, #uname, lname)
307static void __init __maybe_unused
308bfin_debug_mmrs_port(struct dentry *parent, unsigned long base, int num)
309{
310 char buf[32], *_buf;
311#ifdef __ADSPBF54x__
312 _buf = REGS_STR_PFX_C(buf, PORT, num);
313 __PORT(FER, port_fer);
314 __PORT(SET, data_set);
315 __PORT(CLEAR, data_clear);
316 __PORT(DIR_SET, dir_set);
317 __PORT(DIR_CLEAR, dir_clear);
318 __PORT(INEN, inen);
319 __PORT(MUX, port_mux);
320#else
321 _buf = buf + sprintf(buf, "PORT%cIO_", num);
322 __PORT(CLEAR, data_clear);
323 __PORT(SET, data_set);
324 __PORT(TOGGLE, toggle);
325 __PORT(MASKA, maska);
326 __PORT(MASKA_CLEAR, maska_clear);
327 __PORT(MASKA_SET, maska_set);
328 __PORT(MASKA_TOGGLE, maska_toggle);
329 __PORT(MASKB, maskb);
330 __PORT(MASKB_CLEAR, maskb_clear);
331 __PORT(MASKB_SET, maskb_set);
332 __PORT(MASKB_TOGGLE, maskb_toggle);
333 __PORT(DIR, dir);
334 __PORT(POLAR, polar);
335 __PORT(EDGE, edge);
336 __PORT(BOTH, both);
337 __PORT(INEN, inen);
338#endif
339 _buf[-1] = '\0';
340 d(buf, 16, base + REGS_OFF(gpio, data));
341}
342#define PORT(base, num) bfin_debug_mmrs_port(parent, base, num)
343
344/*
345 * PPI
346 */
347#define __PPI(uname, lname) __REGS(ppi, #uname, lname)
348static void __init __maybe_unused
349bfin_debug_mmrs_ppi(struct dentry *parent, unsigned long base, int num)
350{
351 char buf[32], *_buf = REGS_STR_PFX(buf, PPI, num);
352 __PPI(CONTROL, control);
353 __PPI(STATUS, status);
354 __PPI(COUNT, count);
355 __PPI(DELAY, delay);
356 __PPI(FRAME, frame);
357}
358#define PPI(num) bfin_debug_mmrs_ppi(parent, PPI##num##_STATUS, num)
359
360/*
361 * SPI
362 */
363#define __SPI(uname, lname) __REGS(spi, #uname, lname)
364static void __init __maybe_unused
365bfin_debug_mmrs_spi(struct dentry *parent, unsigned long base, int num)
366{
367 char buf[32], *_buf = REGS_STR_PFX(buf, SPI, num);
368 __SPI(CTL, ctl);
369 __SPI(FLG, flg);
370 __SPI(STAT, stat);
371 __SPI(TDBR, tdbr);
372 __SPI(RDBR, rdbr);
373 __SPI(BAUD, baud);
374 __SPI(SHADOW, shadow);
375}
376#define SPI(num) bfin_debug_mmrs_spi(parent, SPI##num##_REGBASE, num)
377
378/*
379 * SPORT
380 */
381static inline int sport_width(void *mmr)
382{
383 unsigned long lmmr = (unsigned long)mmr;
384 if ((lmmr & 0xff) == 0x10)
385 /* SPORT#_TX has 0x10 offset -> SPORT#_TCR2 has 0x04 offset */
386 lmmr -= 0xc;
387 else
388 /* SPORT#_RX has 0x18 offset -> SPORT#_RCR2 has 0x24 offset */
389 lmmr += 0xc;
390 /* extract SLEN field from control register 2 and add 1 */
391 return (bfin_read16(lmmr) & 0x1f) + 1;
392}
393static int sport_set(void *mmr, u64 val)
394{
395 unsigned long flags;
396 local_irq_save(flags);
397 if (sport_width(mmr) <= 16)
398 bfin_write16(mmr, val);
399 else
400 bfin_write32(mmr, val);
401 local_irq_restore(flags);
402 return 0;
403}
404static int sport_get(void *mmr, u64 *val)
405{
406 unsigned long flags;
407 local_irq_save(flags);
408 if (sport_width(mmr) <= 16)
409 *val = bfin_read16(mmr);
410 else
411 *val = bfin_read32(mmr);
412 local_irq_restore(flags);
413 return 0;
414}
415DEFINE_SIMPLE_ATTRIBUTE(fops_sport, sport_get, sport_set, "0x%08llx\n");
416/*DEFINE_SIMPLE_ATTRIBUTE(fops_sport_ro, sport_get, NULL, "0x%08llx\n");*/
417DEFINE_SIMPLE_ATTRIBUTE(fops_sport_wo, NULL, sport_set, "0x%08llx\n");
418#define SPORT_OFF(mmr) (SPORT0_##mmr - SPORT0_TCR1)
419#define _D_SPORT(name, perms, fops) \
420 do { \
421 strcpy(_buf, #name); \
422 debugfs_create_file(buf, perms, parent, (void *)(base + SPORT_OFF(name)), fops); \
423 } while (0)
424#define __SPORT_RW(name) _D_SPORT(name, S_IRUSR|S_IWUSR, &fops_sport)
425#define __SPORT_RO(name) _D_SPORT(name, S_IRUSR, &fops_sport_ro)
426#define __SPORT_WO(name) _D_SPORT(name, S_IWUSR, &fops_sport_wo)
427#define __SPORT(name, bits) \
428 do { \
429 strcpy(_buf, #name); \
430 debugfs_create_x##bits(buf, S_IRUSR|S_IWUSR, parent, (u##bits *)(base + SPORT_OFF(name))); \
431 } while (0)
432static void __init __maybe_unused
433bfin_debug_mmrs_sport(struct dentry *parent, unsigned long base, int num)
434{
435 char buf[32], *_buf = REGS_STR_PFX(buf, SPORT, num);
436 __SPORT(CHNL, 16);
437 __SPORT(MCMC1, 16);
438 __SPORT(MCMC2, 16);
439 __SPORT(MRCS0, 32);
440 __SPORT(MRCS1, 32);
441 __SPORT(MRCS2, 32);
442 __SPORT(MRCS3, 32);
443 __SPORT(MTCS0, 32);
444 __SPORT(MTCS1, 32);
445 __SPORT(MTCS2, 32);
446 __SPORT(MTCS3, 32);
447 __SPORT(RCLKDIV, 16);
448 __SPORT(RCR1, 16);
449 __SPORT(RCR2, 16);
450 __SPORT(RFSDIV, 16);
451 __SPORT_RW(RX);
452 __SPORT(STAT, 16);
453 __SPORT(TCLKDIV, 16);
454 __SPORT(TCR1, 16);
455 __SPORT(TCR2, 16);
456 __SPORT(TFSDIV, 16);
457 __SPORT_WO(TX);
458}
459#define SPORT(num) bfin_debug_mmrs_sport(parent, SPORT##num##_TCR1, num)
460
461/*
462 * TWI
463 */
464#define __TWI(uname, lname) __REGS(twi, #uname, lname)
465static void __init __maybe_unused
466bfin_debug_mmrs_twi(struct dentry *parent, unsigned long base, int num)
467{
468 char buf[32], *_buf = REGS_STR_PFX(buf, TWI, num);
469 __TWI(CLKDIV, clkdiv);
470 __TWI(CONTROL, control);
471 __TWI(SLAVE_CTL, slave_ctl);
472 __TWI(SLAVE_STAT, slave_stat);
473 __TWI(SLAVE_ADDR, slave_addr);
474 __TWI(MASTER_CTL, master_ctl);
475 __TWI(MASTER_STAT, master_stat);
476 __TWI(MASTER_ADDR, master_addr);
477 __TWI(INT_STAT, int_stat);
478 __TWI(INT_MASK, int_mask);
479 __TWI(FIFO_CTL, fifo_ctl);
480 __TWI(FIFO_STAT, fifo_stat);
481 __TWI(XMT_DATA8, xmt_data8);
482 __TWI(XMT_DATA16, xmt_data16);
483 __TWI(RCV_DATA8, rcv_data8);
484 __TWI(RCV_DATA16, rcv_data16);
485}
486#define TWI(num) bfin_debug_mmrs_twi(parent, TWI##num##_CLKDIV, num)
487
488/*
489 * UART
490 */
491#define __UART(uname, lname) __REGS(uart, #uname, lname)
492static void __init __maybe_unused
493bfin_debug_mmrs_uart(struct dentry *parent, unsigned long base, int num)
494{
495 char buf[32], *_buf = REGS_STR_PFX(buf, UART, num);
496#ifdef BFIN_UART_BF54X_STYLE
497 __UART(DLL, dll);
498 __UART(DLH, dlh);
499 __UART(GCTL, gctl);
500 __UART(LCR, lcr);
501 __UART(MCR, mcr);
502 __UART(LSR, lsr);
503 __UART(MSR, msr);
504 __UART(SCR, scr);
505 __UART(IER_SET, ier_set);
506 __UART(IER_CLEAR, ier_clear);
507 __UART(THR, thr);
508 __UART(RBR, rbr);
509#else
510 __UART(DLL, dll);
511 __UART(THR, thr);
512 __UART(RBR, rbr);
513 __UART(DLH, dlh);
514 __UART(IER, ier);
515 __UART(IIR, iir);
516 __UART(LCR, lcr);
517 __UART(MCR, mcr);
518 __UART(LSR, lsr);
519 __UART(MSR, msr);
520 __UART(SCR, scr);
521 __UART(GCTL, gctl);
522#endif
523}
524#define UART(num) bfin_debug_mmrs_uart(parent, UART##num##_DLL, num)
525
526/*
527 * The actual debugfs generation
528 */
529static struct dentry *debug_mmrs_dentry;
530
531static int __init bfin_debug_mmrs_init(void)
532{
533 struct dentry *top, *parent;
534
535 pr_info("debug-mmrs: setting up Blackfin MMR debugfs\n");
536
537 top = debugfs_create_dir("blackfin", NULL);
538 if (top == NULL)
539 return -1;
540
541 parent = debugfs_create_dir("core_regs", top);
542 debugfs_create_file("cclk", S_IRUSR, parent, NULL, &fops_debug_cclk);
543 debugfs_create_file("sclk", S_IRUSR, parent, NULL, &fops_debug_sclk);
544 debugfs_create_x32("last_seqstat", S_IRUSR, parent, &last_seqstat);
545 D_SYSREG(cycles);
546 D_SYSREG(cycles2);
547 D_SYSREG(emudat);
548 D_SYSREG(seqstat);
549 D_SYSREG(syscfg);
550
551 /* Core MMRs */
552 parent = debugfs_create_dir("ctimer", top);
553 D32(TCNTL);
554 D32(TCOUNT);
555 D32(TPERIOD);
556 D32(TSCALE);
557
558 parent = debugfs_create_dir("cec", top);
559 D32(EVT0);
560 D32(EVT1);
561 D32(EVT2);
562 D32(EVT3);
563 D32(EVT4);
564 D32(EVT5);
565 D32(EVT6);
566 D32(EVT7);
567 D32(EVT8);
568 D32(EVT9);
569 D32(EVT10);
570 D32(EVT11);
571 D32(EVT12);
572 D32(EVT13);
573 D32(EVT14);
574 D32(EVT15);
575 D32(EVT_OVERRIDE);
576 D32(IMASK);
577 D32(IPEND);
578 D32(ILAT);
579 D32(IPRIO);
580
581 parent = debugfs_create_dir("debug", top);
582 D32(DBGSTAT);
583 D32(DSPID);
584
585 parent = debugfs_create_dir("mmu", top);
586 D32(SRAM_BASE_ADDRESS);
587 D32(DCPLB_ADDR0);
588 D32(DCPLB_ADDR10);
589 D32(DCPLB_ADDR11);
590 D32(DCPLB_ADDR12);
591 D32(DCPLB_ADDR13);
592 D32(DCPLB_ADDR14);
593 D32(DCPLB_ADDR15);
594 D32(DCPLB_ADDR1);
595 D32(DCPLB_ADDR2);
596 D32(DCPLB_ADDR3);
597 D32(DCPLB_ADDR4);
598 D32(DCPLB_ADDR5);
599 D32(DCPLB_ADDR6);
600 D32(DCPLB_ADDR7);
601 D32(DCPLB_ADDR8);
602 D32(DCPLB_ADDR9);
603 D32(DCPLB_DATA0);
604 D32(DCPLB_DATA10);
605 D32(DCPLB_DATA11);
606 D32(DCPLB_DATA12);
607 D32(DCPLB_DATA13);
608 D32(DCPLB_DATA14);
609 D32(DCPLB_DATA15);
610 D32(DCPLB_DATA1);
611 D32(DCPLB_DATA2);
612 D32(DCPLB_DATA3);
613 D32(DCPLB_DATA4);
614 D32(DCPLB_DATA5);
615 D32(DCPLB_DATA6);
616 D32(DCPLB_DATA7);
617 D32(DCPLB_DATA8);
618 D32(DCPLB_DATA9);
619 D32(DCPLB_FAULT_ADDR);
620 D32(DCPLB_STATUS);
621 D32(DMEM_CONTROL);
622 D32(DTEST_COMMAND);
623 D32(DTEST_DATA0);
624 D32(DTEST_DATA1);
625
626 D32(ICPLB_ADDR0);
627 D32(ICPLB_ADDR1);
628 D32(ICPLB_ADDR2);
629 D32(ICPLB_ADDR3);
630 D32(ICPLB_ADDR4);
631 D32(ICPLB_ADDR5);
632 D32(ICPLB_ADDR6);
633 D32(ICPLB_ADDR7);
634 D32(ICPLB_ADDR8);
635 D32(ICPLB_ADDR9);
636 D32(ICPLB_ADDR10);
637 D32(ICPLB_ADDR11);
638 D32(ICPLB_ADDR12);
639 D32(ICPLB_ADDR13);
640 D32(ICPLB_ADDR14);
641 D32(ICPLB_ADDR15);
642 D32(ICPLB_DATA0);
643 D32(ICPLB_DATA1);
644 D32(ICPLB_DATA2);
645 D32(ICPLB_DATA3);
646 D32(ICPLB_DATA4);
647 D32(ICPLB_DATA5);
648 D32(ICPLB_DATA6);
649 D32(ICPLB_DATA7);
650 D32(ICPLB_DATA8);
651 D32(ICPLB_DATA9);
652 D32(ICPLB_DATA10);
653 D32(ICPLB_DATA11);
654 D32(ICPLB_DATA12);
655 D32(ICPLB_DATA13);
656 D32(ICPLB_DATA14);
657 D32(ICPLB_DATA15);
658 D32(ICPLB_FAULT_ADDR);
659 D32(ICPLB_STATUS);
660 D32(IMEM_CONTROL);
661 if (!ANOMALY_05000481) {
662 D32(ITEST_COMMAND);
663 D32(ITEST_DATA0);
664 D32(ITEST_DATA1);
665 }
666
667 parent = debugfs_create_dir("perf", top);
668 D32(PFCNTR0);
669 D32(PFCNTR1);
670 D32(PFCTL);
671
672 parent = debugfs_create_dir("trace", top);
673 D32(TBUF);
674 D32(TBUFCTL);
675 D32(TBUFSTAT);
676
677 parent = debugfs_create_dir("watchpoint", top);
678 D32(WPIACTL);
679 D32(WPIA0);
680 D32(WPIA1);
681 D32(WPIA2);
682 D32(WPIA3);
683 D32(WPIA4);
684 D32(WPIA5);
685 D32(WPIACNT0);
686 D32(WPIACNT1);
687 D32(WPIACNT2);
688 D32(WPIACNT3);
689 D32(WPIACNT4);
690 D32(WPIACNT5);
691 D32(WPDACTL);
692 D32(WPDA0);
693 D32(WPDA1);
694 D32(WPDACNT0);
695 D32(WPDACNT1);
696 D32(WPSTAT);
697
698 /* System MMRs */
699#ifdef ATAPI_CONTROL
700 parent = debugfs_create_dir("atapi", top);
701 D16(ATAPI_CONTROL);
702 D16(ATAPI_DEV_ADDR);
703 D16(ATAPI_DEV_RXBUF);
704 D16(ATAPI_DEV_TXBUF);
705 D16(ATAPI_DMA_TFRCNT);
706 D16(ATAPI_INT_MASK);
707 D16(ATAPI_INT_STATUS);
708 D16(ATAPI_LINE_STATUS);
709 D16(ATAPI_MULTI_TIM_0);
710 D16(ATAPI_MULTI_TIM_1);
711 D16(ATAPI_MULTI_TIM_2);
712 D16(ATAPI_PIO_TFRCNT);
713 D16(ATAPI_PIO_TIM_0);
714 D16(ATAPI_PIO_TIM_1);
715 D16(ATAPI_REG_TIM_0);
716 D16(ATAPI_SM_STATE);
717 D16(ATAPI_STATUS);
718 D16(ATAPI_TERMINATE);
719 D16(ATAPI_UDMAOUT_TFRCNT);
720 D16(ATAPI_ULTRA_TIM_0);
721 D16(ATAPI_ULTRA_TIM_1);
722 D16(ATAPI_ULTRA_TIM_2);
723 D16(ATAPI_ULTRA_TIM_3);
724 D16(ATAPI_UMAIN_TFRCNT);
725 D16(ATAPI_XFER_LEN);
726#endif
727
728#if defined(CAN_MC1) || defined(CAN0_MC1) || defined(CAN1_MC1)
729 parent = debugfs_create_dir("can", top);
730# ifdef CAN_MC1
731 bfin_debug_mmrs_can(parent, CAN_MC1, -1);
732# endif
733# ifdef CAN0_MC1
734 CAN(0);
735# endif
736# ifdef CAN1_MC1
737 CAN(1);
738# endif
739#endif
740
741#ifdef CNT_COMMAND
742 parent = debugfs_create_dir("counter", top);
743 D16(CNT_COMMAND);
744 D16(CNT_CONFIG);
745 D32(CNT_COUNTER);
746 D16(CNT_DEBOUNCE);
747 D16(CNT_IMASK);
748 D32(CNT_MAX);
749 D32(CNT_MIN);
750 D16(CNT_STATUS);
751#endif
752
753 parent = debugfs_create_dir("dmac", top);
754#ifdef DMA_TC_CNT
755 D16(DMAC_TC_CNT);
756 D16(DMAC_TC_PER);
757#endif
758#ifdef DMAC0_TC_CNT
759 D16(DMAC0_TC_CNT);
760 D16(DMAC0_TC_PER);
761#endif
762#ifdef DMAC1_TC_CNT
763 D16(DMAC1_TC_CNT);
764 D16(DMAC1_TC_PER);
765#endif
766#ifdef DMAC1_PERIMUX
767 D16(DMAC1_PERIMUX);
768#endif
769
770#ifdef __ADSPBF561__
771 /* XXX: should rewrite the MMR map */
772# define DMA0_NEXT_DESC_PTR DMA2_0_NEXT_DESC_PTR
773# define DMA1_NEXT_DESC_PTR DMA2_1_NEXT_DESC_PTR
774# define DMA2_NEXT_DESC_PTR DMA2_2_NEXT_DESC_PTR
775# define DMA3_NEXT_DESC_PTR DMA2_3_NEXT_DESC_PTR
776# define DMA4_NEXT_DESC_PTR DMA2_4_NEXT_DESC_PTR
777# define DMA5_NEXT_DESC_PTR DMA2_5_NEXT_DESC_PTR
778# define DMA6_NEXT_DESC_PTR DMA2_6_NEXT_DESC_PTR
779# define DMA7_NEXT_DESC_PTR DMA2_7_NEXT_DESC_PTR
780# define DMA8_NEXT_DESC_PTR DMA2_8_NEXT_DESC_PTR
781# define DMA9_NEXT_DESC_PTR DMA2_9_NEXT_DESC_PTR
782# define DMA10_NEXT_DESC_PTR DMA2_10_NEXT_DESC_PTR
783# define DMA11_NEXT_DESC_PTR DMA2_11_NEXT_DESC_PTR
784# define DMA12_NEXT_DESC_PTR DMA1_0_NEXT_DESC_PTR
785# define DMA13_NEXT_DESC_PTR DMA1_1_NEXT_DESC_PTR
786# define DMA14_NEXT_DESC_PTR DMA1_2_NEXT_DESC_PTR
787# define DMA15_NEXT_DESC_PTR DMA1_3_NEXT_DESC_PTR
788# define DMA16_NEXT_DESC_PTR DMA1_4_NEXT_DESC_PTR
789# define DMA17_NEXT_DESC_PTR DMA1_5_NEXT_DESC_PTR
790# define DMA18_NEXT_DESC_PTR DMA1_6_NEXT_DESC_PTR
791# define DMA19_NEXT_DESC_PTR DMA1_7_NEXT_DESC_PTR
792# define DMA20_NEXT_DESC_PTR DMA1_8_NEXT_DESC_PTR
793# define DMA21_NEXT_DESC_PTR DMA1_9_NEXT_DESC_PTR
794# define DMA22_NEXT_DESC_PTR DMA1_10_NEXT_DESC_PTR
795# define DMA23_NEXT_DESC_PTR DMA1_11_NEXT_DESC_PTR
796#endif
797 parent = debugfs_create_dir("dma", top);
798 DMA(0);
799 DMA(1);
800 DMA(1);
801 DMA(2);
802 DMA(3);
803 DMA(4);
804 DMA(5);
805 DMA(6);
806 DMA(7);
807#ifdef DMA8_NEXT_DESC_PTR
808 DMA(8);
809 DMA(9);
810 DMA(10);
811 DMA(11);
812#endif
813#ifdef DMA12_NEXT_DESC_PTR
814 DMA(12);
815 DMA(13);
816 DMA(14);
817 DMA(15);
818 DMA(16);
819 DMA(17);
820 DMA(18);
821 DMA(19);
822#endif
823#ifdef DMA20_NEXT_DESC_PTR
824 DMA(20);
825 DMA(21);
826 DMA(22);
827 DMA(23);
828#endif
829
830 parent = debugfs_create_dir("ebiu_amc", top);
831 D32(EBIU_AMBCTL0);
832 D32(EBIU_AMBCTL1);
833 D16(EBIU_AMGCTL);
834#ifdef EBIU_MBSCTL
835 D16(EBIU_MBSCTL);
836 D32(EBIU_ARBSTAT);
837 D32(EBIU_MODE);
838 D16(EBIU_FCTL);
839#endif
840
841#ifdef EBIU_SDGCTL
842 parent = debugfs_create_dir("ebiu_sdram", top);
843# ifdef __ADSPBF561__
844 D32(EBIU_SDBCTL);
845# else
846 D16(EBIU_SDBCTL);
847# endif
848 D32(EBIU_SDGCTL);
849 D16(EBIU_SDRRC);
850 D16(EBIU_SDSTAT);
851#endif
852
853#ifdef EBIU_DDRACCT
854 parent = debugfs_create_dir("ebiu_ddr", top);
855 D32(EBIU_DDRACCT);
856 D32(EBIU_DDRARCT);
857 D32(EBIU_DDRBRC0);
858 D32(EBIU_DDRBRC1);
859 D32(EBIU_DDRBRC2);
860 D32(EBIU_DDRBRC3);
861 D32(EBIU_DDRBRC4);
862 D32(EBIU_DDRBRC5);
863 D32(EBIU_DDRBRC6);
864 D32(EBIU_DDRBRC7);
865 D32(EBIU_DDRBWC0);
866 D32(EBIU_DDRBWC1);
867 D32(EBIU_DDRBWC2);
868 D32(EBIU_DDRBWC3);
869 D32(EBIU_DDRBWC4);
870 D32(EBIU_DDRBWC5);
871 D32(EBIU_DDRBWC6);
872 D32(EBIU_DDRBWC7);
873 D32(EBIU_DDRCTL0);
874 D32(EBIU_DDRCTL1);
875 D32(EBIU_DDRCTL2);
876 D32(EBIU_DDRCTL3);
877 D32(EBIU_DDRGC0);
878 D32(EBIU_DDRGC1);
879 D32(EBIU_DDRGC2);
880 D32(EBIU_DDRGC3);
881 D32(EBIU_DDRMCCL);
882 D32(EBIU_DDRMCEN);
883 D32(EBIU_DDRQUE);
884 D32(EBIU_DDRTACT);
885 D32(EBIU_ERRADD);
886 D16(EBIU_ERRMST);
887 D16(EBIU_RSTCTL);
888#endif
889
890#ifdef EMAC_ADDRHI
891 parent = debugfs_create_dir("emac", top);
892 D32(EMAC_ADDRHI);
893 D32(EMAC_ADDRLO);
894 D32(EMAC_FLC);
895 D32(EMAC_HASHHI);
896 D32(EMAC_HASHLO);
897 D32(EMAC_MMC_CTL);
898 D32(EMAC_MMC_RIRQE);
899 D32(EMAC_MMC_RIRQS);
900 D32(EMAC_MMC_TIRQE);
901 D32(EMAC_MMC_TIRQS);
902 D32(EMAC_OPMODE);
903 D32(EMAC_RXC_ALIGN);
904 D32(EMAC_RXC_ALLFRM);
905 D32(EMAC_RXC_ALLOCT);
906 D32(EMAC_RXC_BROAD);
907 D32(EMAC_RXC_DMAOVF);
908 D32(EMAC_RXC_EQ64);
909 D32(EMAC_RXC_FCS);
910 D32(EMAC_RXC_GE1024);
911 D32(EMAC_RXC_LNERRI);
912 D32(EMAC_RXC_LNERRO);
913 D32(EMAC_RXC_LONG);
914 D32(EMAC_RXC_LT1024);
915 D32(EMAC_RXC_LT128);
916 D32(EMAC_RXC_LT256);
917 D32(EMAC_RXC_LT512);
918 D32(EMAC_RXC_MACCTL);
919 D32(EMAC_RXC_MULTI);
920 D32(EMAC_RXC_OCTET);
921 D32(EMAC_RXC_OK);
922 D32(EMAC_RXC_OPCODE);
923 D32(EMAC_RXC_PAUSE);
924 D32(EMAC_RXC_SHORT);
925 D32(EMAC_RXC_TYPED);
926 D32(EMAC_RXC_UNICST);
927 D32(EMAC_RX_IRQE);
928 D32(EMAC_RX_STAT);
929 D32(EMAC_RX_STKY);
930 D32(EMAC_STAADD);
931 D32(EMAC_STADAT);
932 D32(EMAC_SYSCTL);
933 D32(EMAC_SYSTAT);
934 D32(EMAC_TXC_1COL);
935 D32(EMAC_TXC_ABORT);
936 D32(EMAC_TXC_ALLFRM);
937 D32(EMAC_TXC_ALLOCT);
938 D32(EMAC_TXC_BROAD);
939 D32(EMAC_TXC_CRSERR);
940 D32(EMAC_TXC_DEFER);
941 D32(EMAC_TXC_DMAUND);
942 D32(EMAC_TXC_EQ64);
943 D32(EMAC_TXC_GE1024);
944 D32(EMAC_TXC_GT1COL);
945 D32(EMAC_TXC_LATECL);
946 D32(EMAC_TXC_LT1024);
947 D32(EMAC_TXC_LT128);
948 D32(EMAC_TXC_LT256);
949 D32(EMAC_TXC_LT512);
950 D32(EMAC_TXC_MACCTL);
951 D32(EMAC_TXC_MULTI);
952 D32(EMAC_TXC_OCTET);
953 D32(EMAC_TXC_OK);
954 D32(EMAC_TXC_UNICST);
955 D32(EMAC_TXC_XS_COL);
956 D32(EMAC_TXC_XS_DFR);
957 D32(EMAC_TX_IRQE);
958 D32(EMAC_TX_STAT);
959 D32(EMAC_TX_STKY);
960 D32(EMAC_VLAN1);
961 D32(EMAC_VLAN2);
962 D32(EMAC_WKUP_CTL);
963 D32(EMAC_WKUP_FFCMD);
964 D32(EMAC_WKUP_FFCRC0);
965 D32(EMAC_WKUP_FFCRC1);
966 D32(EMAC_WKUP_FFMSK0);
967 D32(EMAC_WKUP_FFMSK1);
968 D32(EMAC_WKUP_FFMSK2);
969 D32(EMAC_WKUP_FFMSK3);
970 D32(EMAC_WKUP_FFOFF);
971# ifdef EMAC_PTP_ACCR
972 D32(EMAC_PTP_ACCR);
973 D32(EMAC_PTP_ADDEND);
974 D32(EMAC_PTP_ALARMHI);
975 D32(EMAC_PTP_ALARMLO);
976 D16(EMAC_PTP_CTL);
977 D32(EMAC_PTP_FOFF);
978 D32(EMAC_PTP_FV1);
979 D32(EMAC_PTP_FV2);
980 D32(EMAC_PTP_FV3);
981 D16(EMAC_PTP_ID_OFF);
982 D32(EMAC_PTP_ID_SNAP);
983 D16(EMAC_PTP_IE);
984 D16(EMAC_PTP_ISTAT);
985 D32(EMAC_PTP_OFFSET);
986 D32(EMAC_PTP_PPS_PERIOD);
987 D32(EMAC_PTP_PPS_STARTHI);
988 D32(EMAC_PTP_PPS_STARTLO);
989 D32(EMAC_PTP_RXSNAPHI);
990 D32(EMAC_PTP_RXSNAPLO);
991 D32(EMAC_PTP_TIMEHI);
992 D32(EMAC_PTP_TIMELO);
993 D32(EMAC_PTP_TXSNAPHI);
994 D32(EMAC_PTP_TXSNAPLO);
995# endif
996#endif
997
998#if defined(EPPI0_STATUS) || defined(EPPI1_STATUS) || defined(EPPI2_STATUS)
999 parent = debugfs_create_dir("eppi", top);
1000# ifdef EPPI0_STATUS
1001 EPPI(0);
1002# endif
1003# ifdef EPPI1_STATUS
1004 EPPI(1);
1005# endif
1006# ifdef EPPI2_STATUS
1007 EPPI(2);
1008# endif
1009#endif
1010
1011 parent = debugfs_create_dir("gptimer", top);
1012#ifdef TIMER_DISABLE
1013 D16(TIMER_DISABLE);
1014 D16(TIMER_ENABLE);
1015 D32(TIMER_STATUS);
1016#endif
1017#ifdef TIMER_DISABLE0
1018 D16(TIMER_DISABLE0);
1019 D16(TIMER_ENABLE0);
1020 D32(TIMER_STATUS0);
1021#endif
1022#ifdef TIMER_DISABLE1
1023 D16(TIMER_DISABLE1);
1024 D16(TIMER_ENABLE1);
1025 D32(TIMER_STATUS1);
1026#endif
1027 /* XXX: Should convert BF561 MMR names */
1028#ifdef TMRS4_DISABLE
1029 D16(TMRS4_DISABLE);
1030 D16(TMRS4_ENABLE);
1031 D32(TMRS4_STATUS);
1032 D16(TMRS8_DISABLE);
1033 D16(TMRS8_ENABLE);
1034 D32(TMRS8_STATUS);
1035#endif
1036 GPTIMER(0);
1037 GPTIMER(1);
1038 GPTIMER(2);
1039#ifdef TIMER3_CONFIG
1040 GPTIMER(3);
1041 GPTIMER(4);
1042 GPTIMER(5);
1043 GPTIMER(6);
1044 GPTIMER(7);
1045#endif
1046#ifdef TIMER8_CONFIG
1047 GPTIMER(8);
1048 GPTIMER(9);
1049 GPTIMER(10);
1050#endif
1051#ifdef TIMER11_CONFIG
1052 GPTIMER(11);
1053#endif
1054
1055#ifdef HMDMA0_CONTROL
1056 parent = debugfs_create_dir("hmdma", top);
1057 HMDMA(0);
1058 HMDMA(1);
1059#endif
1060
1061#ifdef HOST_CONTROL
1062 parent = debugfs_create_dir("hostdp", top);
1063 D16(HOST_CONTROL);
1064 D16(HOST_STATUS);
1065 D16(HOST_TIMEOUT);
1066#endif
1067
1068#ifdef IMDMA_S0_CONFIG
1069 parent = debugfs_create_dir("imdma", top);
1070 IMDMA(0);
1071 IMDMA(1);
1072#endif
1073
1074#ifdef KPAD_CTL
1075 parent = debugfs_create_dir("keypad", top);
1076 D16(KPAD_CTL);
1077 D16(KPAD_PRESCALE);
1078 D16(KPAD_MSEL);
1079 D16(KPAD_ROWCOL);
1080 D16(KPAD_STAT);
1081 D16(KPAD_SOFTEVAL);
1082#endif
1083
1084 parent = debugfs_create_dir("mdma", top);
1085 MDMA(0);
1086 MDMA(1);
1087#ifdef MDMA_D2_CONFIG
1088 MDMA(2);
1089 MDMA(3);
1090#endif
1091
1092#ifdef MXVR_CONFIG
1093 parent = debugfs_create_dir("mxvr", top);
1094 D16(MXVR_CONFIG);
1095# ifdef MXVR_PLL_CTL_0
1096 D32(MXVR_PLL_CTL_0);
1097# endif
1098 D32(MXVR_STATE_0);
1099 D32(MXVR_STATE_1);
1100 D32(MXVR_INT_STAT_0);
1101 D32(MXVR_INT_STAT_1);
1102 D32(MXVR_INT_EN_0);
1103 D32(MXVR_INT_EN_1);
1104 D16(MXVR_POSITION);
1105 D16(MXVR_MAX_POSITION);
1106 D16(MXVR_DELAY);
1107 D16(MXVR_MAX_DELAY);
1108 D32(MXVR_LADDR);
1109 D16(MXVR_GADDR);
1110 D32(MXVR_AADDR);
1111 D32(MXVR_ALLOC_0);
1112 D32(MXVR_ALLOC_1);
1113 D32(MXVR_ALLOC_2);
1114 D32(MXVR_ALLOC_3);
1115 D32(MXVR_ALLOC_4);
1116 D32(MXVR_ALLOC_5);
1117 D32(MXVR_ALLOC_6);
1118 D32(MXVR_ALLOC_7);
1119 D32(MXVR_ALLOC_8);
1120 D32(MXVR_ALLOC_9);
1121 D32(MXVR_ALLOC_10);
1122 D32(MXVR_ALLOC_11);
1123 D32(MXVR_ALLOC_12);
1124 D32(MXVR_ALLOC_13);
1125 D32(MXVR_ALLOC_14);
1126 D32(MXVR_SYNC_LCHAN_0);
1127 D32(MXVR_SYNC_LCHAN_1);
1128 D32(MXVR_SYNC_LCHAN_2);
1129 D32(MXVR_SYNC_LCHAN_3);
1130 D32(MXVR_SYNC_LCHAN_4);
1131 D32(MXVR_SYNC_LCHAN_5);
1132 D32(MXVR_SYNC_LCHAN_6);
1133 D32(MXVR_SYNC_LCHAN_7);
1134 D32(MXVR_DMA0_CONFIG);
1135 D32(MXVR_DMA0_START_ADDR);
1136 D16(MXVR_DMA0_COUNT);
1137 D32(MXVR_DMA0_CURR_ADDR);
1138 D16(MXVR_DMA0_CURR_COUNT);
1139 D32(MXVR_DMA1_CONFIG);
1140 D32(MXVR_DMA1_START_ADDR);
1141 D16(MXVR_DMA1_COUNT);
1142 D32(MXVR_DMA1_CURR_ADDR);
1143 D16(MXVR_DMA1_CURR_COUNT);
1144 D32(MXVR_DMA2_CONFIG);
1145 D32(MXVR_DMA2_START_ADDR);
1146 D16(MXVR_DMA2_COUNT);
1147 D32(MXVR_DMA2_CURR_ADDR);
1148 D16(MXVR_DMA2_CURR_COUNT);
1149 D32(MXVR_DMA3_CONFIG);
1150 D32(MXVR_DMA3_START_ADDR);
1151 D16(MXVR_DMA3_COUNT);
1152 D32(MXVR_DMA3_CURR_ADDR);
1153 D16(MXVR_DMA3_CURR_COUNT);
1154 D32(MXVR_DMA4_CONFIG);
1155 D32(MXVR_DMA4_START_ADDR);
1156 D16(MXVR_DMA4_COUNT);
1157 D32(MXVR_DMA4_CURR_ADDR);
1158 D16(MXVR_DMA4_CURR_COUNT);
1159 D32(MXVR_DMA5_CONFIG);
1160 D32(MXVR_DMA5_START_ADDR);
1161 D16(MXVR_DMA5_COUNT);
1162 D32(MXVR_DMA5_CURR_ADDR);
1163 D16(MXVR_DMA5_CURR_COUNT);
1164 D32(MXVR_DMA6_CONFIG);
1165 D32(MXVR_DMA6_START_ADDR);
1166 D16(MXVR_DMA6_COUNT);
1167 D32(MXVR_DMA6_CURR_ADDR);
1168 D16(MXVR_DMA6_CURR_COUNT);
1169 D32(MXVR_DMA7_CONFIG);
1170 D32(MXVR_DMA7_START_ADDR);
1171 D16(MXVR_DMA7_COUNT);
1172 D32(MXVR_DMA7_CURR_ADDR);
1173 D16(MXVR_DMA7_CURR_COUNT);
1174 D16(MXVR_AP_CTL);
1175 D32(MXVR_APRB_START_ADDR);
1176 D32(MXVR_APRB_CURR_ADDR);
1177 D32(MXVR_APTB_START_ADDR);
1178 D32(MXVR_APTB_CURR_ADDR);
1179 D32(MXVR_CM_CTL);
1180 D32(MXVR_CMRB_START_ADDR);
1181 D32(MXVR_CMRB_CURR_ADDR);
1182 D32(MXVR_CMTB_START_ADDR);
1183 D32(MXVR_CMTB_CURR_ADDR);
1184 D32(MXVR_RRDB_START_ADDR);
1185 D32(MXVR_RRDB_CURR_ADDR);
1186 D32(MXVR_PAT_DATA_0);
1187 D32(MXVR_PAT_EN_0);
1188 D32(MXVR_PAT_DATA_1);
1189 D32(MXVR_PAT_EN_1);
1190 D16(MXVR_FRAME_CNT_0);
1191 D16(MXVR_FRAME_CNT_1);
1192 D32(MXVR_ROUTING_0);
1193 D32(MXVR_ROUTING_1);
1194 D32(MXVR_ROUTING_2);
1195 D32(MXVR_ROUTING_3);
1196 D32(MXVR_ROUTING_4);
1197 D32(MXVR_ROUTING_5);
1198 D32(MXVR_ROUTING_6);
1199 D32(MXVR_ROUTING_7);
1200 D32(MXVR_ROUTING_8);
1201 D32(MXVR_ROUTING_9);
1202 D32(MXVR_ROUTING_10);
1203 D32(MXVR_ROUTING_11);
1204 D32(MXVR_ROUTING_12);
1205 D32(MXVR_ROUTING_13);
1206 D32(MXVR_ROUTING_14);
1207# ifdef MXVR_PLL_CTL_1
1208 D32(MXVR_PLL_CTL_1);
1209# endif
1210 D16(MXVR_BLOCK_CNT);
1211# ifdef MXVR_CLK_CTL
1212 D32(MXVR_CLK_CTL);
1213# endif
1214# ifdef MXVR_CDRPLL_CTL
1215 D32(MXVR_CDRPLL_CTL);
1216# endif
1217# ifdef MXVR_FMPLL_CTL
1218 D32(MXVR_FMPLL_CTL);
1219# endif
1220# ifdef MXVR_PIN_CTL
1221 D16(MXVR_PIN_CTL);
1222# endif
1223# ifdef MXVR_SCLK_CNT
1224 D16(MXVR_SCLK_CNT);
1225# endif
1226#endif
1227
1228#ifdef NFC_ADDR
1229 parent = debugfs_create_dir("nfc", top);
1230 D_WO(NFC_ADDR, 16);
1231 D_WO(NFC_CMD, 16);
1232 D_RO(NFC_COUNT, 16);
1233 D16(NFC_CTL);
1234 D_WO(NFC_DATA_RD, 16);
1235 D_WO(NFC_DATA_WR, 16);
1236 D_RO(NFC_ECC0, 16);
1237 D_RO(NFC_ECC1, 16);
1238 D_RO(NFC_ECC2, 16);
1239 D_RO(NFC_ECC3, 16);
1240 D16(NFC_IRQMASK);
1241 D16(NFC_IRQSTAT);
1242 D_WO(NFC_PGCTL, 16);
1243 D_RO(NFC_READ, 16);
1244 D16(NFC_RST);
1245 D_RO(NFC_STAT, 16);
1246#endif
1247
1248#ifdef OTP_CONTROL
1249 parent = debugfs_create_dir("otp", top);
1250 D16(OTP_CONTROL);
1251 D16(OTP_BEN);
1252 D16(OTP_STATUS);
1253 D32(OTP_TIMING);
1254 D32(OTP_DATA0);
1255 D32(OTP_DATA1);
1256 D32(OTP_DATA2);
1257 D32(OTP_DATA3);
1258#endif
1259
1260#ifdef PIXC_CTL
1261 parent = debugfs_create_dir("pixc", top);
1262 D16(PIXC_CTL);
1263 D16(PIXC_PPL);
1264 D16(PIXC_LPF);
1265 D16(PIXC_AHSTART);
1266 D16(PIXC_AHEND);
1267 D16(PIXC_AVSTART);
1268 D16(PIXC_AVEND);
1269 D16(PIXC_ATRANSP);
1270 D16(PIXC_BHSTART);
1271 D16(PIXC_BHEND);
1272 D16(PIXC_BVSTART);
1273 D16(PIXC_BVEND);
1274 D16(PIXC_BTRANSP);
1275 D16(PIXC_INTRSTAT);
1276 D32(PIXC_RYCON);
1277 D32(PIXC_GUCON);
1278 D32(PIXC_BVCON);
1279 D32(PIXC_CCBIAS);
1280 D32(PIXC_TC);
1281#endif
1282
1283 parent = debugfs_create_dir("pll", top);
1284 D16(PLL_CTL);
1285 D16(PLL_DIV);
1286 D16(PLL_LOCKCNT);
1287 D16(PLL_STAT);
1288 D16(VR_CTL);
1289 D32(CHIPID); /* it's part of this hardware block */
1290
1291#if defined(PPI_STATUS) || defined(PPI0_STATUS) || defined(PPI1_STATUS)
1292 parent = debugfs_create_dir("ppi", top);
1293# ifdef PPI_STATUS
1294 bfin_debug_mmrs_ppi(parent, PPI_STATUS, -1);
1295# endif
1296# ifdef PPI0_STATUS
1297 PPI(0);
1298# endif
1299# ifdef PPI1_STATUS
1300 PPI(1);
1301# endif
1302#endif
1303
1304#ifdef PWM_CTRL
1305 parent = debugfs_create_dir("pwm", top);
1306 D16(PWM_CTRL);
1307 D16(PWM_STAT);
1308 D16(PWM_TM);
1309 D16(PWM_DT);
1310 D16(PWM_GATE);
1311 D16(PWM_CHA);
1312 D16(PWM_CHB);
1313 D16(PWM_CHC);
1314 D16(PWM_SEG);
1315 D16(PWM_SYNCWT);
1316 D16(PWM_CHAL);
1317 D16(PWM_CHBL);
1318 D16(PWM_CHCL);
1319 D16(PWM_LSI);
1320 D16(PWM_STAT2);
1321#endif
1322
1323#ifdef RSI_CONFIG
1324 parent = debugfs_create_dir("rsi", top);
1325 D32(RSI_ARGUMENT);
1326 D16(RSI_CEATA_CONTROL);
1327 D16(RSI_CLK_CONTROL);
1328 D16(RSI_COMMAND);
1329 D16(RSI_CONFIG);
1330 D16(RSI_DATA_CNT);
1331 D16(RSI_DATA_CONTROL);
1332 D16(RSI_DATA_LGTH);
1333 D32(RSI_DATA_TIMER);
1334 D16(RSI_EMASK);
1335 D16(RSI_ESTAT);
1336 D32(RSI_FIFO);
1337 D16(RSI_FIFO_CNT);
1338 D32(RSI_MASK0);
1339 D32(RSI_MASK1);
1340 D16(RSI_PID0);
1341 D16(RSI_PID1);
1342 D16(RSI_PID2);
1343 D16(RSI_PID3);
1344 D16(RSI_PWR_CONTROL);
1345 D16(RSI_RD_WAIT_EN);
1346 D32(RSI_RESPONSE0);
1347 D32(RSI_RESPONSE1);
1348 D32(RSI_RESPONSE2);
1349 D32(RSI_RESPONSE3);
1350 D16(RSI_RESP_CMD);
1351 D32(RSI_STATUS);
1352 D_WO(RSI_STATUSCL, 16);
1353#endif
1354
1355#ifdef RTC_ALARM
1356 parent = debugfs_create_dir("rtc", top);
1357 D32(RTC_ALARM);
1358 D16(RTC_ICTL);
1359 D16(RTC_ISTAT);
1360 D16(RTC_PREN);
1361 D32(RTC_STAT);
1362 D16(RTC_SWCNT);
1363#endif
1364
1365#ifdef SDH_CFG
1366 parent = debugfs_create_dir("sdh", top);
1367 D32(SDH_ARGUMENT);
1368 D16(SDH_CFG);
1369 D16(SDH_CLK_CTL);
1370 D16(SDH_COMMAND);
1371 D_RO(SDH_DATA_CNT, 16);
1372 D16(SDH_DATA_CTL);
1373 D16(SDH_DATA_LGTH);
1374 D32(SDH_DATA_TIMER);
1375 D16(SDH_E_MASK);
1376 D16(SDH_E_STATUS);
1377 D32(SDH_FIFO);
1378 D_RO(SDH_FIFO_CNT, 16);
1379 D32(SDH_MASK0);
1380 D32(SDH_MASK1);
1381 D_RO(SDH_PID0, 16);
1382 D_RO(SDH_PID1, 16);
1383 D_RO(SDH_PID2, 16);
1384 D_RO(SDH_PID3, 16);
1385 D_RO(SDH_PID4, 16);
1386 D_RO(SDH_PID5, 16);
1387 D_RO(SDH_PID6, 16);
1388 D_RO(SDH_PID7, 16);
1389 D16(SDH_PWR_CTL);
1390 D16(SDH_RD_WAIT_EN);
1391 D_RO(SDH_RESPONSE0, 32);
1392 D_RO(SDH_RESPONSE1, 32);
1393 D_RO(SDH_RESPONSE2, 32);
1394 D_RO(SDH_RESPONSE3, 32);
1395 D_RO(SDH_RESP_CMD, 16);
1396 D_RO(SDH_STATUS, 32);
1397 D_WO(SDH_STATUS_CLR, 16);
1398#endif
1399
1400#ifdef SECURE_CONTROL
1401 parent = debugfs_create_dir("security", top);
1402 D16(SECURE_CONTROL);
1403 D16(SECURE_STATUS);
1404 D32(SECURE_SYSSWT);
1405#endif
1406
1407 parent = debugfs_create_dir("sic", top);
1408 D16(SWRST);
1409 D16(SYSCR);
1410 D16(SIC_RVECT);
1411 D32(SIC_IAR0);
1412 D32(SIC_IAR1);
1413 D32(SIC_IAR2);
1414#ifdef SIC_IAR3
1415 D32(SIC_IAR3);
1416#endif
1417#ifdef SIC_IAR4
1418 D32(SIC_IAR4);
1419 D32(SIC_IAR5);
1420 D32(SIC_IAR6);
1421#endif
1422#ifdef SIC_IAR7
1423 D32(SIC_IAR7);
1424#endif
1425#ifdef SIC_IAR8
1426 D32(SIC_IAR8);
1427 D32(SIC_IAR9);
1428 D32(SIC_IAR10);
1429 D32(SIC_IAR11);
1430#endif
1431#ifdef SIC_IMASK
1432 D32(SIC_IMASK);
1433 D32(SIC_ISR);
1434 D32(SIC_IWR);
1435#endif
1436#ifdef SIC_IMASK0
1437 D32(SIC_IMASK0);
1438 D32(SIC_IMASK1);
1439 D32(SIC_ISR0);
1440 D32(SIC_ISR1);
1441 D32(SIC_IWR0);
1442 D32(SIC_IWR1);
1443#endif
1444#ifdef SIC_IMASK2
1445 D32(SIC_IMASK2);
1446 D32(SIC_ISR2);
1447 D32(SIC_IWR2);
1448#endif
1449#ifdef SICB_RVECT
1450 D16(SICB_SWRST);
1451 D16(SICB_SYSCR);
1452 D16(SICB_RVECT);
1453 D32(SICB_IAR0);
1454 D32(SICB_IAR1);
1455 D32(SICB_IAR2);
1456 D32(SICB_IAR3);
1457 D32(SICB_IAR4);
1458 D32(SICB_IAR5);
1459 D32(SICB_IAR6);
1460 D32(SICB_IAR7);
1461 D32(SICB_IMASK0);
1462 D32(SICB_IMASK1);
1463 D32(SICB_ISR0);
1464 D32(SICB_ISR1);
1465 D32(SICB_IWR0);
1466 D32(SICB_IWR1);
1467#endif
1468
1469 parent = debugfs_create_dir("spi", top);
1470#ifdef SPI0_REGBASE
1471 SPI(0);
1472#endif
1473#ifdef SPI1_REGBASE
1474 SPI(1);
1475#endif
1476#ifdef SPI2_REGBASE
1477 SPI(2);
1478#endif
1479
1480 parent = debugfs_create_dir("sport", top);
1481#ifdef SPORT0_STAT
1482 SPORT(0);
1483#endif
1484#ifdef SPORT1_STAT
1485 SPORT(1);
1486#endif
1487#ifdef SPORT2_STAT
1488 SPORT(2);
1489#endif
1490#ifdef SPORT3_STAT
1491 SPORT(3);
1492#endif
1493
1494#if defined(TWI_CLKDIV) || defined(TWI0_CLKDIV) || defined(TWI1_CLKDIV)
1495 parent = debugfs_create_dir("twi", top);
1496# ifdef TWI_CLKDIV
1497 bfin_debug_mmrs_twi(parent, TWI_CLKDIV, -1);
1498# endif
1499# ifdef TWI0_CLKDIV
1500 TWI(0);
1501# endif
1502# ifdef TWI1_CLKDIV
1503 TWI(1);
1504# endif
1505#endif
1506
1507 parent = debugfs_create_dir("uart", top);
1508#ifdef BFIN_UART_DLL
1509 bfin_debug_mmrs_uart(parent, BFIN_UART_DLL, -1);
1510#endif
1511#ifdef UART0_DLL
1512 UART(0);
1513#endif
1514#ifdef UART1_DLL
1515 UART(1);
1516#endif
1517#ifdef UART2_DLL
1518 UART(2);
1519#endif
1520#ifdef UART3_DLL
1521 UART(3);
1522#endif
1523
1524#ifdef USB_FADDR
1525 parent = debugfs_create_dir("usb", top);
1526 D16(USB_FADDR);
1527 D16(USB_POWER);
1528 D16(USB_INTRTX);
1529 D16(USB_INTRRX);
1530 D16(USB_INTRTXE);
1531 D16(USB_INTRRXE);
1532 D16(USB_INTRUSB);
1533 D16(USB_INTRUSBE);
1534 D16(USB_FRAME);
1535 D16(USB_INDEX);
1536 D16(USB_TESTMODE);
1537 D16(USB_GLOBINTR);
1538 D16(USB_GLOBAL_CTL);
1539 D16(USB_TX_MAX_PACKET);
1540 D16(USB_CSR0);
1541 D16(USB_TXCSR);
1542 D16(USB_RX_MAX_PACKET);
1543 D16(USB_RXCSR);
1544 D16(USB_COUNT0);
1545 D16(USB_RXCOUNT);
1546 D16(USB_TXTYPE);
1547 D16(USB_NAKLIMIT0);
1548 D16(USB_TXINTERVAL);
1549 D16(USB_RXTYPE);
1550 D16(USB_RXINTERVAL);
1551 D16(USB_TXCOUNT);
1552 D16(USB_EP0_FIFO);
1553 D16(USB_EP1_FIFO);
1554 D16(USB_EP2_FIFO);
1555 D16(USB_EP3_FIFO);
1556 D16(USB_EP4_FIFO);
1557 D16(USB_EP5_FIFO);
1558 D16(USB_EP6_FIFO);
1559 D16(USB_EP7_FIFO);
1560 D16(USB_OTG_DEV_CTL);
1561 D16(USB_OTG_VBUS_IRQ);
1562 D16(USB_OTG_VBUS_MASK);
1563 D16(USB_LINKINFO);
1564 D16(USB_VPLEN);
1565 D16(USB_HS_EOF1);
1566 D16(USB_FS_EOF1);
1567 D16(USB_LS_EOF1);
1568 D16(USB_APHY_CNTRL);
1569 D16(USB_APHY_CALIB);
1570 D16(USB_APHY_CNTRL2);
1571 D16(USB_PHY_TEST);
1572 D16(USB_PLLOSC_CTRL);
1573 D16(USB_SRP_CLKDIV);
1574 D16(USB_EP_NI0_TXMAXP);
1575 D16(USB_EP_NI0_TXCSR);
1576 D16(USB_EP_NI0_RXMAXP);
1577 D16(USB_EP_NI0_RXCSR);
1578 D16(USB_EP_NI0_RXCOUNT);
1579 D16(USB_EP_NI0_TXTYPE);
1580 D16(USB_EP_NI0_TXINTERVAL);
1581 D16(USB_EP_NI0_RXTYPE);
1582 D16(USB_EP_NI0_RXINTERVAL);
1583 D16(USB_EP_NI0_TXCOUNT);
1584 D16(USB_EP_NI1_TXMAXP);
1585 D16(USB_EP_NI1_TXCSR);
1586 D16(USB_EP_NI1_RXMAXP);
1587 D16(USB_EP_NI1_RXCSR);
1588 D16(USB_EP_NI1_RXCOUNT);
1589 D16(USB_EP_NI1_TXTYPE);
1590 D16(USB_EP_NI1_TXINTERVAL);
1591 D16(USB_EP_NI1_RXTYPE);
1592 D16(USB_EP_NI1_RXINTERVAL);
1593 D16(USB_EP_NI1_TXCOUNT);
1594 D16(USB_EP_NI2_TXMAXP);
1595 D16(USB_EP_NI2_TXCSR);
1596 D16(USB_EP_NI2_RXMAXP);
1597 D16(USB_EP_NI2_RXCSR);
1598 D16(USB_EP_NI2_RXCOUNT);
1599 D16(USB_EP_NI2_TXTYPE);
1600 D16(USB_EP_NI2_TXINTERVAL);
1601 D16(USB_EP_NI2_RXTYPE);
1602 D16(USB_EP_NI2_RXINTERVAL);
1603 D16(USB_EP_NI2_TXCOUNT);
1604 D16(USB_EP_NI3_TXMAXP);
1605 D16(USB_EP_NI3_TXCSR);
1606 D16(USB_EP_NI3_RXMAXP);
1607 D16(USB_EP_NI3_RXCSR);
1608 D16(USB_EP_NI3_RXCOUNT);
1609 D16(USB_EP_NI3_TXTYPE);
1610 D16(USB_EP_NI3_TXINTERVAL);
1611 D16(USB_EP_NI3_RXTYPE);
1612 D16(USB_EP_NI3_RXINTERVAL);
1613 D16(USB_EP_NI3_TXCOUNT);
1614 D16(USB_EP_NI4_TXMAXP);
1615 D16(USB_EP_NI4_TXCSR);
1616 D16(USB_EP_NI4_RXMAXP);
1617 D16(USB_EP_NI4_RXCSR);
1618 D16(USB_EP_NI4_RXCOUNT);
1619 D16(USB_EP_NI4_TXTYPE);
1620 D16(USB_EP_NI4_TXINTERVAL);
1621 D16(USB_EP_NI4_RXTYPE);
1622 D16(USB_EP_NI4_RXINTERVAL);
1623 D16(USB_EP_NI4_TXCOUNT);
1624 D16(USB_EP_NI5_TXMAXP);
1625 D16(USB_EP_NI5_TXCSR);
1626 D16(USB_EP_NI5_RXMAXP);
1627 D16(USB_EP_NI5_RXCSR);
1628 D16(USB_EP_NI5_RXCOUNT);
1629 D16(USB_EP_NI5_TXTYPE);
1630 D16(USB_EP_NI5_TXINTERVAL);
1631 D16(USB_EP_NI5_RXTYPE);
1632 D16(USB_EP_NI5_RXINTERVAL);
1633 D16(USB_EP_NI5_TXCOUNT);
1634 D16(USB_EP_NI6_TXMAXP);
1635 D16(USB_EP_NI6_TXCSR);
1636 D16(USB_EP_NI6_RXMAXP);
1637 D16(USB_EP_NI6_RXCSR);
1638 D16(USB_EP_NI6_RXCOUNT);
1639 D16(USB_EP_NI6_TXTYPE);
1640 D16(USB_EP_NI6_TXINTERVAL);
1641 D16(USB_EP_NI6_RXTYPE);
1642 D16(USB_EP_NI6_RXINTERVAL);
1643 D16(USB_EP_NI6_TXCOUNT);
1644 D16(USB_EP_NI7_TXMAXP);
1645 D16(USB_EP_NI7_TXCSR);
1646 D16(USB_EP_NI7_RXMAXP);
1647 D16(USB_EP_NI7_RXCSR);
1648 D16(USB_EP_NI7_RXCOUNT);
1649 D16(USB_EP_NI7_TXTYPE);
1650 D16(USB_EP_NI7_TXINTERVAL);
1651 D16(USB_EP_NI7_RXTYPE);
1652 D16(USB_EP_NI7_RXINTERVAL);
1653 D16(USB_EP_NI7_TXCOUNT);
1654 D16(USB_DMA_INTERRUPT);
1655 D16(USB_DMA0CONTROL);
1656 D16(USB_DMA0ADDRLOW);
1657 D16(USB_DMA0ADDRHIGH);
1658 D16(USB_DMA0COUNTLOW);
1659 D16(USB_DMA0COUNTHIGH);
1660 D16(USB_DMA1CONTROL);
1661 D16(USB_DMA1ADDRLOW);
1662 D16(USB_DMA1ADDRHIGH);
1663 D16(USB_DMA1COUNTLOW);
1664 D16(USB_DMA1COUNTHIGH);
1665 D16(USB_DMA2CONTROL);
1666 D16(USB_DMA2ADDRLOW);
1667 D16(USB_DMA2ADDRHIGH);
1668 D16(USB_DMA2COUNTLOW);
1669 D16(USB_DMA2COUNTHIGH);
1670 D16(USB_DMA3CONTROL);
1671 D16(USB_DMA3ADDRLOW);
1672 D16(USB_DMA3ADDRHIGH);
1673 D16(USB_DMA3COUNTLOW);
1674 D16(USB_DMA3COUNTHIGH);
1675 D16(USB_DMA4CONTROL);
1676 D16(USB_DMA4ADDRLOW);
1677 D16(USB_DMA4ADDRHIGH);
1678 D16(USB_DMA4COUNTLOW);
1679 D16(USB_DMA4COUNTHIGH);
1680 D16(USB_DMA5CONTROL);
1681 D16(USB_DMA5ADDRLOW);
1682 D16(USB_DMA5ADDRHIGH);
1683 D16(USB_DMA5COUNTLOW);
1684 D16(USB_DMA5COUNTHIGH);
1685 D16(USB_DMA6CONTROL);
1686 D16(USB_DMA6ADDRLOW);
1687 D16(USB_DMA6ADDRHIGH);
1688 D16(USB_DMA6COUNTLOW);
1689 D16(USB_DMA6COUNTHIGH);
1690 D16(USB_DMA7CONTROL);
1691 D16(USB_DMA7ADDRLOW);
1692 D16(USB_DMA7ADDRHIGH);
1693 D16(USB_DMA7COUNTLOW);
1694 D16(USB_DMA7COUNTHIGH);
1695#endif
1696
1697#ifdef WDOG_CNT
1698 parent = debugfs_create_dir("watchdog", top);
1699 D32(WDOG_CNT);
1700 D16(WDOG_CTL);
1701 D32(WDOG_STAT);
1702#endif
1703#ifdef WDOGA_CNT
1704 parent = debugfs_create_dir("watchdog", top);
1705 D32(WDOGA_CNT);
1706 D16(WDOGA_CTL);
1707 D32(WDOGA_STAT);
1708 D32(WDOGB_CNT);
1709 D16(WDOGB_CTL);
1710 D32(WDOGB_STAT);
1711#endif
1712
1713 /* BF533 glue */
1714#ifdef FIO_FLAG_D
1715#define PORTFIO FIO_FLAG_D
1716#endif
1717 /* BF561 glue */
1718#ifdef FIO0_FLAG_D
1719#define PORTFIO FIO0_FLAG_D
1720#endif
1721#ifdef FIO1_FLAG_D
1722#define PORTGIO FIO1_FLAG_D
1723#endif
1724#ifdef FIO2_FLAG_D
1725#define PORTHIO FIO2_FLAG_D
1726#endif
1727 parent = debugfs_create_dir("port", top);
1728#ifdef PORTFIO
1729 PORT(PORTFIO, 'F');
1730#endif
1731#ifdef PORTGIO
1732 PORT(PORTGIO, 'G');
1733#endif
1734#ifdef PORTHIO
1735 PORT(PORTHIO, 'H');
1736#endif
1737
1738#ifdef __ADSPBF51x__
1739 D16(PORTF_FER);
1740 D16(PORTF_DRIVE);
1741 D16(PORTF_HYSTERESIS);
1742 D16(PORTF_MUX);
1743
1744 D16(PORTG_FER);
1745 D16(PORTG_DRIVE);
1746 D16(PORTG_HYSTERESIS);
1747 D16(PORTG_MUX);
1748
1749 D16(PORTH_FER);
1750 D16(PORTH_DRIVE);
1751 D16(PORTH_HYSTERESIS);
1752 D16(PORTH_MUX);
1753
1754 D16(MISCPORT_DRIVE);
1755 D16(MISCPORT_HYSTERESIS);
1756#endif /* BF51x */
1757
1758#ifdef __ADSPBF52x__
1759 D16(PORTF_FER);
1760 D16(PORTF_DRIVE);
1761 D16(PORTF_HYSTERESIS);
1762 D16(PORTF_MUX);
1763 D16(PORTF_SLEW);
1764
1765 D16(PORTG_FER);
1766 D16(PORTG_DRIVE);
1767 D16(PORTG_HYSTERESIS);
1768 D16(PORTG_MUX);
1769 D16(PORTG_SLEW);
1770
1771 D16(PORTH_FER);
1772 D16(PORTH_DRIVE);
1773 D16(PORTH_HYSTERESIS);
1774 D16(PORTH_MUX);
1775 D16(PORTH_SLEW);
1776
1777 D16(MISCPORT_DRIVE);
1778 D16(MISCPORT_HYSTERESIS);
1779 D16(MISCPORT_SLEW);
1780#endif /* BF52x */
1781
1782#ifdef BF537_FAMILY
1783 D16(PORTF_FER);
1784 D16(PORTG_FER);
1785 D16(PORTH_FER);
1786 D16(PORT_MUX);
1787#endif /* BF534 BF536 BF537 */
1788
1789#ifdef BF538_FAMILY
1790 D16(PORTCIO_FER);
1791 D16(PORTCIO);
1792 D16(PORTCIO_CLEAR);
1793 D16(PORTCIO_SET);
1794 D16(PORTCIO_TOGGLE);
1795 D16(PORTCIO_DIR);
1796 D16(PORTCIO_INEN);
1797
1798 D16(PORTDIO);
1799 D16(PORTDIO_CLEAR);
1800 D16(PORTDIO_DIR);
1801 D16(PORTDIO_FER);
1802 D16(PORTDIO_INEN);
1803 D16(PORTDIO_SET);
1804 D16(PORTDIO_TOGGLE);
1805
1806 D16(PORTEIO);
1807 D16(PORTEIO_CLEAR);
1808 D16(PORTEIO_DIR);
1809 D16(PORTEIO_FER);
1810 D16(PORTEIO_INEN);
1811 D16(PORTEIO_SET);
1812 D16(PORTEIO_TOGGLE);
1813#endif /* BF538 BF539 */
1814
1815#ifdef __ADSPBF54x__
1816 {
1817 int num;
1818 unsigned long base;
1819 char *_buf, buf[32];
1820
1821 base = PORTA_FER;
1822 for (num = 0; num < 10; ++num) {
1823 PORT(base, num);
1824 base += sizeof(struct bfin_gpio_regs);
1825 }
1826
1827#define __PINT(uname, lname) __REGS(pint, #uname, lname)
1828 parent = debugfs_create_dir("pint", top);
1829 base = PINT0_MASK_SET;
1830 for (num = 0; num < 4; ++num) {
1831 _buf = REGS_STR_PFX(buf, PINT, num);
1832 __PINT(MASK_SET, mask_set);
1833 __PINT(MASK_CLEAR, mask_clear);
1834 __PINT(IRQ, irq);
1835 __PINT(ASSIGN, assign);
1836 __PINT(EDGE_SET, edge_set);
1837 __PINT(EDGE_CLEAR, edge_clear);
1838 __PINT(INVERT_SET, invert_set);
1839 __PINT(INVERT_CLEAR, invert_clear);
1840 __PINT(PINSTATE, pinstate);
1841 __PINT(LATCH, latch);
1842 base += sizeof(struct bfin_pint_regs);
1843 }
1844
1845 }
1846#endif /* BF54x */
1847
1848 debug_mmrs_dentry = top;
1849
1850 return 0;
1851}
1852module_init(bfin_debug_mmrs_init);
1853
1854static void __exit bfin_debug_mmrs_exit(void)
1855{
1856 debugfs_remove_recursive(debug_mmrs_dentry);
1857}
1858module_exit(bfin_debug_mmrs_exit);
1859
1860MODULE_LICENSE("GPL");
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index f37019c847c9..486426f8a0d7 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -33,6 +33,7 @@
33#include <linux/io.h> 33#include <linux/io.h>
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36#include <asm/irq_handler.h>
36 37
37DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); 38DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
38 39
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 1696d34f51c2..ff3d747154ac 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -11,6 +11,7 @@
11#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <asm/irq_handler.h>
14#include <asm/trace.h> 15#include <asm/trace.h>
15#include <asm/pda.h> 16#include <asm/pda.h>
16 17
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
index 401eb1d8e3b4..679d0db35256 100644
--- a/arch/blackfin/kernel/nmi.c
+++ b/arch/blackfin/kernel/nmi.c
@@ -145,16 +145,16 @@ int check_nmi_wdt_touched(void)
145{ 145{
146 unsigned int this_cpu = smp_processor_id(); 146 unsigned int this_cpu = smp_processor_id();
147 unsigned int cpu; 147 unsigned int cpu;
148 cpumask_t mask;
148 149
149 cpumask_t mask = cpu_online_map; 150 cpumask_copy(&mask, cpu_online_mask);
150
151 if (!atomic_read(&nmi_touched[this_cpu])) 151 if (!atomic_read(&nmi_touched[this_cpu]))
152 return 0; 152 return 0;
153 153
154 atomic_set(&nmi_touched[this_cpu], 0); 154 atomic_set(&nmi_touched[this_cpu], 0);
155 155
156 cpu_clear(this_cpu, mask); 156 cpumask_clear_cpu(this_cpu, &mask);
157 for_each_cpu_mask(cpu, mask) { 157 for_each_cpu(cpu, &mask) {
158 invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]), 158 invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
159 (unsigned long)(&nmi_touched[cpu])); 159 (unsigned long)(&nmi_touched[cpu]));
160 if (!atomic_read(&nmi_touched[cpu])) 160 if (!atomic_read(&nmi_touched[cpu]))
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
new file mode 100644
index 000000000000..04300f29c0e7
--- /dev/null
+++ b/arch/blackfin/kernel/perf_event.c
@@ -0,0 +1,498 @@
1/*
2 * Blackfin performance counters
3 *
4 * Copyright 2011 Analog Devices Inc.
5 *
6 * Ripped from SuperH version:
7 *
8 * Copyright (C) 2009 Paul Mundt
9 *
10 * Heavily based on the x86 and PowerPC implementations.
11 *
12 * x86:
13 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
14 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
15 * Copyright (C) 2009 Jaswinder Singh Rajput
16 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
17 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
18 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
19 *
20 * ppc:
21 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
22 *
23 * Licensed under the GPL-2 or later.
24 */
25
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/perf_event.h>
29#include <asm/bfin_pfmon.h>
30
31/*
32 * We have two counters, and each counter can support an event type.
33 * The 'o' is PFCNTx=1 and 's' is PFCNTx=0
34 *
35 * 0x04 o pc invariant branches
36 * 0x06 o mispredicted branches
37 * 0x09 o predicted branches taken
38 * 0x0B o EXCPT insn
39 * 0x0C o CSYNC/SSYNC insn
40 * 0x0D o Insns committed
41 * 0x0E o Interrupts taken
42 * 0x0F o Misaligned address exceptions
43 * 0x80 o Code memory fetches stalled due to DMA
44 * 0x83 o 64bit insn fetches delivered
45 * 0x9A o data cache fills (bank a)
46 * 0x9B o data cache fills (bank b)
47 * 0x9C o data cache lines evicted (bank a)
48 * 0x9D o data cache lines evicted (bank b)
49 * 0x9E o data cache high priority fills
50 * 0x9F o data cache low priority fills
51 * 0x00 s loop 0 iterations
52 * 0x01 s loop 1 iterations
53 * 0x0A s CSYNC/SSYNC stalls
54 * 0x10 s DAG read/after write hazards
55 * 0x13 s RAW data hazards
56 * 0x81 s code TAG stalls
57 * 0x82 s code fill stalls
58 * 0x90 s processor to memory stalls
59 * 0x91 s data memory stalls not hidden by 0x90
60 * 0x92 s data store buffer full stalls
61 * 0x93 s data memory write buffer full stalls due to high->low priority
62 * 0x95 s data memory fill buffer stalls
63 * 0x96 s data TAG collision stalls
64 * 0x97 s data collision stalls
65 * 0x98 s data stalls
66 * 0x99 s data stalls sent to processor
67 */
68
69static const int event_map[] = {
70 /* use CYCLES cpu register */
71 [PERF_COUNT_HW_CPU_CYCLES] = -1,
72 [PERF_COUNT_HW_INSTRUCTIONS] = 0x0D,
73 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
74 [PERF_COUNT_HW_CACHE_MISSES] = 0x83,
75 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x09,
76 [PERF_COUNT_HW_BRANCH_MISSES] = 0x06,
77 [PERF_COUNT_HW_BUS_CYCLES] = -1,
78};
79
80#define C(x) PERF_COUNT_HW_CACHE_##x
81
82static const int cache_events[PERF_COUNT_HW_CACHE_MAX]
83 [PERF_COUNT_HW_CACHE_OP_MAX]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
85{
86 [C(L1D)] = { /* Data bank A */
87 [C(OP_READ)] = {
88 [C(RESULT_ACCESS)] = 0,
89 [C(RESULT_MISS) ] = 0x9A,
90 },
91 [C(OP_WRITE)] = {
92 [C(RESULT_ACCESS)] = 0,
93 [C(RESULT_MISS) ] = 0,
94 },
95 [C(OP_PREFETCH)] = {
96 [C(RESULT_ACCESS)] = 0,
97 [C(RESULT_MISS) ] = 0,
98 },
99 },
100
101 [C(L1I)] = {
102 [C(OP_READ)] = {
103 [C(RESULT_ACCESS)] = 0,
104 [C(RESULT_MISS) ] = 0x83,
105 },
106 [C(OP_WRITE)] = {
107 [C(RESULT_ACCESS)] = -1,
108 [C(RESULT_MISS) ] = -1,
109 },
110 [C(OP_PREFETCH)] = {
111 [C(RESULT_ACCESS)] = 0,
112 [C(RESULT_MISS) ] = 0,
113 },
114 },
115
116 [C(LL)] = {
117 [C(OP_READ)] = {
118 [C(RESULT_ACCESS)] = -1,
119 [C(RESULT_MISS) ] = -1,
120 },
121 [C(OP_WRITE)] = {
122 [C(RESULT_ACCESS)] = -1,
123 [C(RESULT_MISS) ] = -1,
124 },
125 [C(OP_PREFETCH)] = {
126 [C(RESULT_ACCESS)] = -1,
127 [C(RESULT_MISS) ] = -1,
128 },
129 },
130
131 [C(DTLB)] = {
132 [C(OP_READ)] = {
133 [C(RESULT_ACCESS)] = -1,
134 [C(RESULT_MISS) ] = -1,
135 },
136 [C(OP_WRITE)] = {
137 [C(RESULT_ACCESS)] = -1,
138 [C(RESULT_MISS) ] = -1,
139 },
140 [C(OP_PREFETCH)] = {
141 [C(RESULT_ACCESS)] = -1,
142 [C(RESULT_MISS) ] = -1,
143 },
144 },
145
146 [C(ITLB)] = {
147 [C(OP_READ)] = {
148 [C(RESULT_ACCESS)] = -1,
149 [C(RESULT_MISS) ] = -1,
150 },
151 [C(OP_WRITE)] = {
152 [C(RESULT_ACCESS)] = -1,
153 [C(RESULT_MISS) ] = -1,
154 },
155 [C(OP_PREFETCH)] = {
156 [C(RESULT_ACCESS)] = -1,
157 [C(RESULT_MISS) ] = -1,
158 },
159 },
160
161 [C(BPU)] = {
162 [C(OP_READ)] = {
163 [C(RESULT_ACCESS)] = -1,
164 [C(RESULT_MISS) ] = -1,
165 },
166 [C(OP_WRITE)] = {
167 [C(RESULT_ACCESS)] = -1,
168 [C(RESULT_MISS) ] = -1,
169 },
170 [C(OP_PREFETCH)] = {
171 [C(RESULT_ACCESS)] = -1,
172 [C(RESULT_MISS) ] = -1,
173 },
174 },
175};
176
177const char *perf_pmu_name(void)
178{
179 return "bfin";
180}
181EXPORT_SYMBOL(perf_pmu_name);
182
183int perf_num_counters(void)
184{
185 return ARRAY_SIZE(event_map);
186}
187EXPORT_SYMBOL(perf_num_counters);
188
189static u64 bfin_pfmon_read(int idx)
190{
191 return bfin_read32(PFCNTR0 + (idx * 4));
192}
193
194static void bfin_pfmon_disable(struct hw_perf_event *hwc, int idx)
195{
196 bfin_write_PFCTL(bfin_read_PFCTL() & ~PFCEN(idx, PFCEN_MASK));
197}
198
199static void bfin_pfmon_enable(struct hw_perf_event *hwc, int idx)
200{
201 u32 val, mask;
202
203 val = PFPWR;
204 if (idx) {
205 mask = ~(PFCNT1 | PFMON1 | PFCEN1 | PEMUSW1);
206 /* The packed config is for event0, so shift it to event1 slots */
207 val |= (hwc->config << (PFMON1_P - PFMON0_P));
208 val |= (hwc->config & PFCNT0) << (PFCNT1_P - PFCNT0_P);
209 bfin_write_PFCNTR1(0);
210 } else {
211 mask = ~(PFCNT0 | PFMON0 | PFCEN0 | PEMUSW0);
212 val |= hwc->config;
213 bfin_write_PFCNTR0(0);
214 }
215
216 bfin_write_PFCTL((bfin_read_PFCTL() & mask) | val);
217}
218
219static void bfin_pfmon_disable_all(void)
220{
221 bfin_write_PFCTL(bfin_read_PFCTL() & ~PFPWR);
222}
223
224static void bfin_pfmon_enable_all(void)
225{
226 bfin_write_PFCTL(bfin_read_PFCTL() | PFPWR);
227}
228
229struct cpu_hw_events {
230 struct perf_event *events[MAX_HWEVENTS];
231 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
232};
233DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
234
235static int hw_perf_cache_event(int config, int *evp)
236{
237 unsigned long type, op, result;
238 int ev;
239
240 /* unpack config */
241 type = config & 0xff;
242 op = (config >> 8) & 0xff;
243 result = (config >> 16) & 0xff;
244
245 if (type >= PERF_COUNT_HW_CACHE_MAX ||
246 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
247 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
248 return -EINVAL;
249
250 ev = cache_events[type][op][result];
251 if (ev == 0)
252 return -EOPNOTSUPP;
253 if (ev == -1)
254 return -EINVAL;
255 *evp = ev;
256 return 0;
257}
258
259static void bfin_perf_event_update(struct perf_event *event,
260 struct hw_perf_event *hwc, int idx)
261{
262 u64 prev_raw_count, new_raw_count;
263 s64 delta;
264 int shift = 0;
265
266 /*
267 * Depending on the counter configuration, they may or may not
268 * be chained, in which case the previous counter value can be
269 * updated underneath us if the lower-half overflows.
270 *
271 * Our tactic to handle this is to first atomically read and
272 * exchange a new raw count - then add that new-prev delta
273 * count to the generic counter atomically.
274 *
275 * As there is no interrupt associated with the overflow events,
276 * this is the simplest approach for maintaining consistency.
277 */
278again:
279 prev_raw_count = local64_read(&hwc->prev_count);
280 new_raw_count = bfin_pfmon_read(idx);
281
282 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
283 new_raw_count) != prev_raw_count)
284 goto again;
285
286 /*
287 * Now we have the new raw value and have updated the prev
288 * timestamp already. We can now calculate the elapsed delta
289 * (counter-)time and add that to the generic counter.
290 *
291 * Careful, not all hw sign-extends above the physical width
292 * of the count.
293 */
294 delta = (new_raw_count << shift) - (prev_raw_count << shift);
295 delta >>= shift;
296
297 local64_add(delta, &event->count);
298}
299
300static void bfin_pmu_stop(struct perf_event *event, int flags)
301{
302 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
303 struct hw_perf_event *hwc = &event->hw;
304 int idx = hwc->idx;
305
306 if (!(event->hw.state & PERF_HES_STOPPED)) {
307 bfin_pfmon_disable(hwc, idx);
308 cpuc->events[idx] = NULL;
309 event->hw.state |= PERF_HES_STOPPED;
310 }
311
312 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
313 bfin_perf_event_update(event, &event->hw, idx);
314 event->hw.state |= PERF_HES_UPTODATE;
315 }
316}
317
318static void bfin_pmu_start(struct perf_event *event, int flags)
319{
320 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
321 struct hw_perf_event *hwc = &event->hw;
322 int idx = hwc->idx;
323
324 if (WARN_ON_ONCE(idx == -1))
325 return;
326
327 if (flags & PERF_EF_RELOAD)
328 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
329
330 cpuc->events[idx] = event;
331 event->hw.state = 0;
332 bfin_pfmon_enable(hwc, idx);
333}
334
335static void bfin_pmu_del(struct perf_event *event, int flags)
336{
337 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
338
339 bfin_pmu_stop(event, PERF_EF_UPDATE);
340 __clear_bit(event->hw.idx, cpuc->used_mask);
341
342 perf_event_update_userpage(event);
343}
344
345static int bfin_pmu_add(struct perf_event *event, int flags)
346{
347 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
348 struct hw_perf_event *hwc = &event->hw;
349 int idx = hwc->idx;
350 int ret = -EAGAIN;
351
352 perf_pmu_disable(event->pmu);
353
354 if (__test_and_set_bit(idx, cpuc->used_mask)) {
355 idx = find_first_zero_bit(cpuc->used_mask, MAX_HWEVENTS);
356 if (idx == MAX_HWEVENTS)
357 goto out;
358
359 __set_bit(idx, cpuc->used_mask);
360 hwc->idx = idx;
361 }
362
363 bfin_pfmon_disable(hwc, idx);
364
365 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
366 if (flags & PERF_EF_START)
367 bfin_pmu_start(event, PERF_EF_RELOAD);
368
369 perf_event_update_userpage(event);
370 ret = 0;
371out:
372 perf_pmu_enable(event->pmu);
373 return ret;
374}
375
376static void bfin_pmu_read(struct perf_event *event)
377{
378 bfin_perf_event_update(event, &event->hw, event->hw.idx);
379}
380
381static int bfin_pmu_event_init(struct perf_event *event)
382{
383 struct perf_event_attr *attr = &event->attr;
384 struct hw_perf_event *hwc = &event->hw;
385 int config = -1;
386 int ret;
387
388 if (attr->exclude_hv || attr->exclude_idle)
389 return -EPERM;
390
391 /*
392 * All of the on-chip counters are "limited", in that they have
393 * no interrupts, and are therefore unable to do sampling without
394 * further work and timer assistance.
395 */
396 if (hwc->sample_period)
397 return -EINVAL;
398
399 ret = 0;
400 switch (attr->type) {
401 case PERF_TYPE_RAW:
402 config = PFMON(0, attr->config & PFMON_MASK) |
403 PFCNT(0, !(attr->config & 0x100));
404 break;
405 case PERF_TYPE_HW_CACHE:
406 ret = hw_perf_cache_event(attr->config, &config);
407 break;
408 case PERF_TYPE_HARDWARE:
409 if (attr->config >= ARRAY_SIZE(event_map))
410 return -EINVAL;
411
412 config = event_map[attr->config];
413 break;
414 }
415
416 if (config == -1)
417 return -EINVAL;
418
419 if (!attr->exclude_kernel)
420 config |= PFCEN(0, PFCEN_ENABLE_SUPV);
421 if (!attr->exclude_user)
422 config |= PFCEN(0, PFCEN_ENABLE_USER);
423
424 hwc->config |= config;
425
426 return ret;
427}
428
429static void bfin_pmu_enable(struct pmu *pmu)
430{
431 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
432 struct perf_event *event;
433 struct hw_perf_event *hwc;
434 int i;
435
436 for (i = 0; i < MAX_HWEVENTS; ++i) {
437 event = cpuc->events[i];
438 if (!event)
439 continue;
440 hwc = &event->hw;
441 bfin_pfmon_enable(hwc, hwc->idx);
442 }
443
444 bfin_pfmon_enable_all();
445}
446
447static void bfin_pmu_disable(struct pmu *pmu)
448{
449 bfin_pfmon_disable_all();
450}
451
452static struct pmu pmu = {
453 .pmu_enable = bfin_pmu_enable,
454 .pmu_disable = bfin_pmu_disable,
455 .event_init = bfin_pmu_event_init,
456 .add = bfin_pmu_add,
457 .del = bfin_pmu_del,
458 .start = bfin_pmu_start,
459 .stop = bfin_pmu_stop,
460 .read = bfin_pmu_read,
461};
462
463static void bfin_pmu_setup(int cpu)
464{
465 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
466
467 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
468}
469
470static int __cpuinit
471bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
472{
473 unsigned int cpu = (long)hcpu;
474
475 switch (action & ~CPU_TASKS_FROZEN) {
476 case CPU_UP_PREPARE:
477 bfin_write_PFCTL(0);
478 bfin_pmu_setup(cpu);
479 break;
480
481 default:
482 break;
483 }
484
485 return NOTIFY_OK;
486}
487
488static int __init bfin_pmu_init(void)
489{
490 int ret;
491
492 ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
493 if (!ret)
494 perf_cpu_notifier(bfin_pmu_notifier);
495
496 return ret;
497}
498early_initcall(bfin_pmu_init);
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index b407bc8ad918..6a660fa921b5 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -171,10 +171,8 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
171 unsigned long newsp; 171 unsigned long newsp;
172 172
173#ifdef __ARCH_SYNC_CORE_DCACHE 173#ifdef __ARCH_SYNC_CORE_DCACHE
174 if (current->rt.nr_cpus_allowed == num_possible_cpus()) { 174 if (current->rt.nr_cpus_allowed == num_possible_cpus())
175 current->cpus_allowed = cpumask_of_cpu(smp_processor_id()); 175 set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
176 current->rt.nr_cpus_allowed = 1;
177 }
178#endif 176#endif
179 177
180 /* syscall2 puts clone_flags in r0 and usp in r1 */ 178 /* syscall2 puts clone_flags in r0 and usp in r1 */
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index 53d08dee8531..488bdc51aaa5 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -23,6 +23,9 @@
23__attribute__ ((__l1_text__, __noreturn__)) 23__attribute__ ((__l1_text__, __noreturn__))
24static void bfin_reset(void) 24static void bfin_reset(void)
25{ 25{
26 if (!ANOMALY_05000353 && !ANOMALY_05000386)
27 bfrom_SoftReset((void *)(L1_SCRATCH_START + L1_SCRATCH_LENGTH - 20));
28
26 /* Wait for completion of "system" events such as cache line 29 /* Wait for completion of "system" events such as cache line
27 * line fills so that we avoid infinite stalls later on as 30 * line fills so that we avoid infinite stalls later on as
28 * much as possible. This code is in L1, so it won't trigger 31 * much as possible. This code is in L1, so it won't trigger
@@ -30,46 +33,40 @@ static void bfin_reset(void)
30 */ 33 */
31 __builtin_bfin_ssync(); 34 __builtin_bfin_ssync();
32 35
33 /* The bootrom checks to see how it was reset and will 36 /* Initiate System software reset. */
34 * automatically perform a software reset for us when 37 bfin_write_SWRST(0x7);
35 * it starts executing after the core reset.
36 */
37 if (ANOMALY_05000353 || ANOMALY_05000386) {
38 /* Initiate System software reset. */
39 bfin_write_SWRST(0x7);
40 38
41 /* Due to the way reset is handled in the hardware, we need 39 /* Due to the way reset is handled in the hardware, we need
42 * to delay for 10 SCLKS. The only reliable way to do this is 40 * to delay for 10 SCLKS. The only reliable way to do this is
43 * to calculate the CCLK/SCLK ratio and multiply 10. For now, 41 * to calculate the CCLK/SCLK ratio and multiply 10. For now,
44 * we'll assume worse case which is a 1:15 ratio. 42 * we'll assume worse case which is a 1:15 ratio.
45 */ 43 */
46 asm( 44 asm(
47 "LSETUP (1f, 1f) LC0 = %0\n" 45 "LSETUP (1f, 1f) LC0 = %0\n"
48 "1: nop;" 46 "1: nop;"
49 : 47 :
50 : "a" (15 * 10) 48 : "a" (15 * 10)
51 : "LC0", "LB0", "LT0" 49 : "LC0", "LB0", "LT0"
52 ); 50 );
53 51
54 /* Clear System software reset */ 52 /* Clear System software reset */
55 bfin_write_SWRST(0); 53 bfin_write_SWRST(0);
56 54
57 /* The BF526 ROM will crash during reset */ 55 /* The BF526 ROM will crash during reset */
58#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__) 56#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__)
59 bfin_read_SWRST(); 57 bfin_read_SWRST();
60#endif 58#endif
61 59
62 /* Wait for the SWRST write to complete. Cannot rely on SSYNC 60 /* Wait for the SWRST write to complete. Cannot rely on SSYNC
63 * though as the System state is all reset now. 61 * though as the System state is all reset now.
64 */ 62 */
65 asm( 63 asm(
66 "LSETUP (1f, 1f) LC1 = %0\n" 64 "LSETUP (1f, 1f) LC1 = %0\n"
67 "1: nop;" 65 "1: nop;"
68 : 66 :
69 : "a" (15 * 1) 67 : "a" (15 * 1)
70 : "LC1", "LB1", "LT1" 68 : "LC1", "LB1", "LT1"
71 ); 69 );
72 }
73 70
74 while (1) 71 while (1)
75 /* Issue core reset */ 72 /* Issue core reset */
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 805c6132c779..536bd9d7e0cf 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -29,6 +29,7 @@
29#include <asm/cpu.h> 29#include <asm/cpu.h>
30#include <asm/fixed_code.h> 30#include <asm/fixed_code.h>
31#include <asm/early_printk.h> 31#include <asm/early_printk.h>
32#include <asm/irq_handler.h>
32 33
33u16 _bfin_swrst; 34u16 _bfin_swrst;
34EXPORT_SYMBOL(_bfin_swrst); 35EXPORT_SYMBOL(_bfin_swrst);
@@ -105,6 +106,8 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
105 bfin_dcache_init(dcplb_tbl[cpu]); 106 bfin_dcache_init(dcplb_tbl[cpu]);
106#endif 107#endif
107 108
109 bfin_setup_cpudata(cpu);
110
108 /* 111 /*
109 * In cache coherence emulation mode, we need to have the 112 * In cache coherence emulation mode, we need to have the
110 * D-cache enabled before running any atomic operation which 113 * D-cache enabled before running any atomic operation which
@@ -163,7 +166,6 @@ void __cpuinit bfin_setup_cpudata(unsigned int cpu)
163{ 166{
164 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu); 167 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
165 168
166 cpudata->idle = current;
167 cpudata->imemctl = bfin_read_IMEM_CONTROL(); 169 cpudata->imemctl = bfin_read_IMEM_CONTROL();
168 cpudata->dmemctl = bfin_read_DMEM_CONTROL(); 170 cpudata->dmemctl = bfin_read_DMEM_CONTROL();
169} 171}
@@ -851,6 +853,7 @@ void __init native_machine_early_platform_add_devices(void)
851 853
852void __init setup_arch(char **cmdline_p) 854void __init setup_arch(char **cmdline_p)
853{ 855{
856 u32 mmr;
854 unsigned long sclk, cclk; 857 unsigned long sclk, cclk;
855 858
856 native_machine_early_platform_add_devices(); 859 native_machine_early_platform_add_devices();
@@ -902,10 +905,10 @@ void __init setup_arch(char **cmdline_p)
902 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL); 905 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
903#endif 906#endif
904#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL 907#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
905 bfin_write_PORTF_HYSTERISIS(HYST_PORTF_0_15); 908 bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
906 bfin_write_PORTG_HYSTERISIS(HYST_PORTG_0_15); 909 bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
907 bfin_write_PORTH_HYSTERISIS(HYST_PORTH_0_15); 910 bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
908 bfin_write_MISCPORT_HYSTERISIS((bfin_read_MISCPORT_HYSTERISIS() & 911 bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
909 ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO); 912 ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
910#endif 913#endif
911 914
@@ -921,17 +924,14 @@ void __init setup_arch(char **cmdline_p)
921 bfin_read_IMDMA_D1_IRQ_STATUS(); 924 bfin_read_IMDMA_D1_IRQ_STATUS();
922 } 925 }
923#endif 926#endif
924 printk(KERN_INFO "Hardware Trace ");
925 if (bfin_read_TBUFCTL() & 0x1)
926 printk(KERN_CONT "Active ");
927 else
928 printk(KERN_CONT "Off ");
929 if (bfin_read_TBUFCTL() & 0x2)
930 printk(KERN_CONT "and Enabled\n");
931 else
932 printk(KERN_CONT "and Disabled\n");
933 927
934 printk(KERN_INFO "Boot Mode: %i\n", bfin_read_SYSCR() & 0xF); 928 mmr = bfin_read_TBUFCTL();
929 printk(KERN_INFO "Hardware Trace %s and %sabled\n",
930 (mmr & 0x1) ? "active" : "off",
931 (mmr & 0x2) ? "en" : "dis");
932
933 mmr = bfin_read_SYSCR();
934 printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
935 935
936 /* Newer parts mirror SWRST bits in SYSCR */ 936 /* Newer parts mirror SWRST bits in SYSCR */
937#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \ 937#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
@@ -939,7 +939,7 @@ void __init setup_arch(char **cmdline_p)
939 _bfin_swrst = bfin_read_SWRST(); 939 _bfin_swrst = bfin_read_SWRST();
940#else 940#else
941 /* Clear boot mode field */ 941 /* Clear boot mode field */
942 _bfin_swrst = bfin_read_SYSCR() & ~0xf; 942 _bfin_swrst = mmr & ~0xf;
943#endif 943#endif
944 944
945#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT 945#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
@@ -1036,8 +1036,6 @@ void __init setup_arch(char **cmdline_p)
1036static int __init topology_init(void) 1036static int __init topology_init(void)
1037{ 1037{
1038 unsigned int cpu; 1038 unsigned int cpu;
1039 /* Record CPU-private information for the boot processor. */
1040 bfin_setup_cpudata(0);
1041 1039
1042 for_each_possible_cpu(cpu) { 1040 for_each_possible_cpu(cpu) {
1043 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); 1041 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
@@ -1283,12 +1281,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1283 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1281 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1284 BFIN_DLINES); 1282 BFIN_DLINES);
1285#ifdef __ARCH_SYNC_CORE_DCACHE 1283#ifdef __ARCH_SYNC_CORE_DCACHE
1286 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]); 1284 seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
1287#endif 1285#endif
1288#ifdef __ARCH_SYNC_CORE_ICACHE 1286#ifdef __ARCH_SYNC_CORE_ICACHE
1289 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]); 1287 seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
1290#endif 1288#endif
1291 1289
1290 seq_printf(m, "\n");
1291
1292 if (cpu_num != num_possible_cpus() - 1) 1292 if (cpu_num != num_possible_cpus() - 1)
1293 return 0; 1293 return 0;
1294 1294
@@ -1312,13 +1312,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1312 " in data cache\n"); 1312 " in data cache\n");
1313 } 1313 }
1314 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1314 seq_printf(m, "board name\t: %s\n", bfin_board_name);
1315 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1315 seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
1316 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1316 physical_mem_end >> 10, 0ul, physical_mem_end);
1317 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", 1317 seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
1318 ((int)memory_end - (int)_rambase) >> 10, 1318 ((int)memory_end - (int)_rambase) >> 10,
1319 (void *)_rambase, 1319 _rambase, memory_end);
1320 (void *)memory_end);
1321 seq_printf(m, "\n");
1322 1320
1323 return 0; 1321 return 0;
1324} 1322}
@@ -1326,7 +1324,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1326static void *c_start(struct seq_file *m, loff_t *pos) 1324static void *c_start(struct seq_file *m, loff_t *pos)
1327{ 1325{
1328 if (*pos == 0) 1326 if (*pos == 0)
1329 *pos = first_cpu(cpu_online_map); 1327 *pos = cpumask_first(cpu_online_mask);
1330 if (*pos >= num_online_cpus()) 1328 if (*pos >= num_online_cpus())
1331 return NULL; 1329 return NULL;
1332 1330
@@ -1335,7 +1333,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1335 1333
1336static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1334static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1337{ 1335{
1338 *pos = next_cpu(*pos, cpu_online_map); 1336 *pos = cpumask_next(*pos, cpu_online_mask);
1339 1337
1340 return c_start(m, pos); 1338 return c_start(m, pos);
1341} 1339}
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 8d85c8c6f857..3ac5b66d14aa 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -155,14 +155,8 @@ SECTIONS
155 SECURITY_INITCALL 155 SECURITY_INITCALL
156 INIT_RAM_FS 156 INIT_RAM_FS
157 157
158 . = ALIGN(4);
159 ___per_cpu_load = .; 158 ___per_cpu_load = .;
160 ___per_cpu_start = .; 159 PERCPU_INPUT(32)
161 *(.data.percpu.first)
162 *(.data.percpu.page_aligned)
163 *(.data.percpu)
164 *(.data.percpu.shared_aligned)
165 ___per_cpu_end = .;
166 160
167 EXIT_DATA 161 EXIT_DATA
168 __einitdata = .; 162 __einitdata = .;
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index 24918c5f7ea1..d2f076fbbc9e 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -5,7 +5,7 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
@@ -141,6 +141,7 @@
141#define ANOMALY_05000364 (0) 141#define ANOMALY_05000364 (0)
142#define ANOMALY_05000371 (0) 142#define ANOMALY_05000371 (0)
143#define ANOMALY_05000380 (0) 143#define ANOMALY_05000380 (0)
144#define ANOMALY_05000383 (0)
144#define ANOMALY_05000386 (0) 145#define ANOMALY_05000386 (0)
145#define ANOMALY_05000389 (0) 146#define ANOMALY_05000389 (0)
146#define ANOMALY_05000400 (0) 147#define ANOMALY_05000400 (0)
@@ -155,6 +156,7 @@
155#define ANOMALY_05000467 (0) 156#define ANOMALY_05000467 (0)
156#define ANOMALY_05000474 (0) 157#define ANOMALY_05000474 (0)
157#define ANOMALY_05000475 (0) 158#define ANOMALY_05000475 (0)
159#define ANOMALY_05000480 (0)
158#define ANOMALY_05000485 (0) 160#define ANOMALY_05000485 (0)
159 161
160#endif 162#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
index b657d37a3402..bb79627f0929 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
@@ -990,18 +990,18 @@
990#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val) 990#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
991#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW) 991#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
992#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val) 992#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
993#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS) 993#define bfin_read_PORTF_HYSTERESIS() bfin_read16(PORTF_HYSTERESIS)
994#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val) 994#define bfin_write_PORTF_HYSTERESIS(val) bfin_write16(PORTF_HYSTERESIS, val)
995#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS) 995#define bfin_read_PORTG_HYSTERESIS() bfin_read16(PORTG_HYSTERESIS)
996#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val) 996#define bfin_write_PORTG_HYSTERESIS(val) bfin_write16(PORTG_HYSTERESIS, val)
997#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS) 997#define bfin_read_PORTH_HYSTERESIS() bfin_read16(PORTH_HYSTERESIS)
998#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val) 998#define bfin_write_PORTH_HYSTERESIS(val) bfin_write16(PORTH_HYSTERESIS, val)
999#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE) 999#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
1000#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val) 1000#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
1001#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW) 1001#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
1002#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val) 1002#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
1003#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS) 1003#define bfin_read_MISCPORT_HYSTERESIS() bfin_read16(MISCPORT_HYSTERESIS)
1004#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val) 1004#define bfin_write_MISCPORT_HYSTERESIS(val) bfin_write16(MISCPORT_HYSTERESIS, val)
1005 1005
1006/* HOST Port Registers */ 1006/* HOST Port Registers */
1007 1007
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF512.h b/arch/blackfin/mach-bf518/include/mach/defBF512.h
index cb1172f50757..729704078cd7 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF512.h
@@ -561,12 +561,12 @@
561#define PORTF_SLEW 0xFFC03230 /* Port F slew control */ 561#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
562#define PORTG_SLEW 0xFFC03234 /* Port G slew control */ 562#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
563#define PORTH_SLEW 0xFFC03238 /* Port H slew control */ 563#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
564#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */ 564#define PORTF_HYSTERESIS 0xFFC03240 /* Port F Schmitt trigger control */
565#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */ 565#define PORTG_HYSTERESIS 0xFFC03244 /* Port G Schmitt trigger control */
566#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */ 566#define PORTH_HYSTERESIS 0xFFC03248 /* Port H Schmitt trigger control */
567#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */ 567#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
568#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */ 568#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
569#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */ 569#define MISCPORT_HYSTERESIS 0xFFC03288 /* Misc Port Schmitt trigger control */
570 570
571 571
572/*********************************************************************************** 572/***********************************************************************************
diff --git a/arch/blackfin/mach-bf518/include/mach/irq.h b/arch/blackfin/mach-bf518/include/mach/irq.h
index 435e76e31aaa..edf8efd457dc 100644
--- a/arch/blackfin/mach-bf518/include/mach/irq.h
+++ b/arch/blackfin/mach-bf518/include/mach/irq.h
@@ -7,38 +7,9 @@
7#ifndef _BF518_IRQ_H_ 7#ifndef _BF518_IRQ_H_
8#define _BF518_IRQ_H_ 8#define _BF518_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions 11
12 Event Source Core Event Name 12#define NR_PERI_INTS (2 * 32)
13 Core Emulation **
14 Events (highest priority) EMU 0
15 Reset RST 1
16 NMI NMI 2
17 Exception EVX 3
18 Reserved -- 4
19 Hardware Error IVHW 5
20 Core Timer IVTMR 6 *
21
22 .....
23
24 Software Interrupt 1 IVG14 31
25 Software Interrupt 2 --
26 (lowest priority) IVG15 32 *
27*/
28
29#define NR_PERI_INTS (2 * 32)
30
31/* The ABSTRACT IRQ definitions */
32/** the first seven of the following are fixed, the rest you change if you need to **/
33#define IRQ_EMU 0 /* Emulation */
34#define IRQ_RST 1 /* reset */
35#define IRQ_NMI 2 /* Non Maskable */
36#define IRQ_EVX 3 /* Exception */
37#define IRQ_UNUSED 4 /* - unused interrupt */
38#define IRQ_HWERR 5 /* Hardware Error */
39#define IRQ_CORETMR 6 /* Core timer */
40
41#define BFIN_IRQ(x) ((x) + 7)
42 13
43#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */ 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
44#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */ 15#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */
@@ -54,23 +25,23 @@
54#define IRQ_UART0_ERROR BFIN_IRQ(12) /* UART0 Status */ 25#define IRQ_UART0_ERROR BFIN_IRQ(12) /* UART0 Status */
55#define IRQ_UART1_ERROR BFIN_IRQ(13) /* UART1 Status */ 26#define IRQ_UART1_ERROR BFIN_IRQ(13) /* UART1 Status */
56#define IRQ_RTC BFIN_IRQ(14) /* RTC */ 27#define IRQ_RTC BFIN_IRQ(14) /* RTC */
57#define IRQ_PPI BFIN_IRQ(15) /* DMA Channel 0 (PPI) */ 28#define IRQ_PPI BFIN_IRQ(15) /* DMA Channel 0 (PPI) */
58#define IRQ_SPORT0_RX BFIN_IRQ(16) /* DMA 3 Channel (SPORT0 RX) */ 29#define IRQ_SPORT0_RX BFIN_IRQ(16) /* DMA 3 Channel (SPORT0 RX) */
59#define IRQ_SPORT0_TX BFIN_IRQ(17) /* DMA 4 Channel (SPORT0 TX) */ 30#define IRQ_SPORT0_TX BFIN_IRQ(17) /* DMA 4 Channel (SPORT0 TX) */
60#define IRQ_RSI BFIN_IRQ(17) /* DMA 4 Channel (RSI) */ 31#define IRQ_RSI BFIN_IRQ(17) /* DMA 4 Channel (RSI) */
61#define IRQ_SPORT1_RX BFIN_IRQ(18) /* DMA 5 Channel (SPORT1 RX/SPI) */ 32#define IRQ_SPORT1_RX BFIN_IRQ(18) /* DMA 5 Channel (SPORT1 RX/SPI) */
62#define IRQ_SPI1 BFIN_IRQ(18) /* DMA 5 Channel (SPI1) */ 33#define IRQ_SPI1 BFIN_IRQ(18) /* DMA 5 Channel (SPI1) */
63#define IRQ_SPORT1_TX BFIN_IRQ(19) /* DMA 6 Channel (SPORT1 TX) */ 34#define IRQ_SPORT1_TX BFIN_IRQ(19) /* DMA 6 Channel (SPORT1 TX) */
64#define IRQ_TWI BFIN_IRQ(20) /* TWI */ 35#define IRQ_TWI BFIN_IRQ(20) /* TWI */
65#define IRQ_SPI0 BFIN_IRQ(21) /* DMA 7 Channel (SPI0) */ 36#define IRQ_SPI0 BFIN_IRQ(21) /* DMA 7 Channel (SPI0) */
66#define IRQ_UART0_RX BFIN_IRQ(22) /* DMA8 Channel (UART0 RX) */ 37#define IRQ_UART0_RX BFIN_IRQ(22) /* DMA8 Channel (UART0 RX) */
67#define IRQ_UART0_TX BFIN_IRQ(23) /* DMA9 Channel (UART0 TX) */ 38#define IRQ_UART0_TX BFIN_IRQ(23) /* DMA9 Channel (UART0 TX) */
68#define IRQ_UART1_RX BFIN_IRQ(24) /* DMA10 Channel (UART1 RX) */ 39#define IRQ_UART1_RX BFIN_IRQ(24) /* DMA10 Channel (UART1 RX) */
69#define IRQ_UART1_TX BFIN_IRQ(25) /* DMA11 Channel (UART1 TX) */ 40#define IRQ_UART1_TX BFIN_IRQ(25) /* DMA11 Channel (UART1 TX) */
70#define IRQ_OPTSEC BFIN_IRQ(26) /* OTPSEC Interrupt */ 41#define IRQ_OPTSEC BFIN_IRQ(26) /* OTPSEC Interrupt */
71#define IRQ_CNT BFIN_IRQ(27) /* GP Counter */ 42#define IRQ_CNT BFIN_IRQ(27) /* GP Counter */
72#define IRQ_MAC_RX BFIN_IRQ(28) /* DMA1 Channel (MAC RX) */ 43#define IRQ_MAC_RX BFIN_IRQ(28) /* DMA1 Channel (MAC RX) */
73#define IRQ_PORTH_INTA BFIN_IRQ(29) /* Port H Interrupt A */ 44#define IRQ_PORTH_INTA BFIN_IRQ(29) /* Port H Interrupt A */
74#define IRQ_MAC_TX BFIN_IRQ(30) /* DMA2 Channel (MAC TX) */ 45#define IRQ_MAC_TX BFIN_IRQ(30) /* DMA2 Channel (MAC TX) */
75#define IRQ_PORTH_INTB BFIN_IRQ(31) /* Port H Interrupt B */ 46#define IRQ_PORTH_INTB BFIN_IRQ(31) /* Port H Interrupt B */
76#define IRQ_TIMER0 BFIN_IRQ(32) /* Timer 0 */ 47#define IRQ_TIMER0 BFIN_IRQ(32) /* Timer 0 */
@@ -96,101 +67,90 @@
96#define IRQ_PWM_SYNC BFIN_IRQ(54) /* PWM Sync Interrupt */ 67#define IRQ_PWM_SYNC BFIN_IRQ(54) /* PWM Sync Interrupt */
97#define IRQ_PTP_STAT BFIN_IRQ(55) /* PTP Stat Interrupt */ 68#define IRQ_PTP_STAT BFIN_IRQ(55) /* PTP Stat Interrupt */
98 69
99#define SYS_IRQS BFIN_IRQ(63) /* 70 */ 70#define SYS_IRQS BFIN_IRQ(63) /* 70 */
100 71
101#define IRQ_PF0 71 72#define IRQ_PF0 71
102#define IRQ_PF1 72 73#define IRQ_PF1 72
103#define IRQ_PF2 73 74#define IRQ_PF2 73
104#define IRQ_PF3 74 75#define IRQ_PF3 74
105#define IRQ_PF4 75 76#define IRQ_PF4 75
106#define IRQ_PF5 76 77#define IRQ_PF5 76
107#define IRQ_PF6 77 78#define IRQ_PF6 77
108#define IRQ_PF7 78 79#define IRQ_PF7 78
109#define IRQ_PF8 79 80#define IRQ_PF8 79
110#define IRQ_PF9 80 81#define IRQ_PF9 80
111#define IRQ_PF10 81 82#define IRQ_PF10 81
112#define IRQ_PF11 82 83#define IRQ_PF11 82
113#define IRQ_PF12 83 84#define IRQ_PF12 83
114#define IRQ_PF13 84 85#define IRQ_PF13 84
115#define IRQ_PF14 85 86#define IRQ_PF14 85
116#define IRQ_PF15 86 87#define IRQ_PF15 86
117 88
118#define IRQ_PG0 87 89#define IRQ_PG0 87
119#define IRQ_PG1 88 90#define IRQ_PG1 88
120#define IRQ_PG2 89 91#define IRQ_PG2 89
121#define IRQ_PG3 90 92#define IRQ_PG3 90
122#define IRQ_PG4 91 93#define IRQ_PG4 91
123#define IRQ_PG5 92 94#define IRQ_PG5 92
124#define IRQ_PG6 93 95#define IRQ_PG6 93
125#define IRQ_PG7 94 96#define IRQ_PG7 94
126#define IRQ_PG8 95 97#define IRQ_PG8 95
127#define IRQ_PG9 96 98#define IRQ_PG9 96
128#define IRQ_PG10 97 99#define IRQ_PG10 97
129#define IRQ_PG11 98 100#define IRQ_PG11 98
130#define IRQ_PG12 99 101#define IRQ_PG12 99
131#define IRQ_PG13 100 102#define IRQ_PG13 100
132#define IRQ_PG14 101 103#define IRQ_PG14 101
133#define IRQ_PG15 102 104#define IRQ_PG15 102
134 105
135#define IRQ_PH0 103 106#define IRQ_PH0 103
136#define IRQ_PH1 104 107#define IRQ_PH1 104
137#define IRQ_PH2 105 108#define IRQ_PH2 105
138#define IRQ_PH3 106 109#define IRQ_PH3 106
139#define IRQ_PH4 107 110#define IRQ_PH4 107
140#define IRQ_PH5 108 111#define IRQ_PH5 108
141#define IRQ_PH6 109 112#define IRQ_PH6 109
142#define IRQ_PH7 110 113#define IRQ_PH7 110
143#define IRQ_PH8 111 114#define IRQ_PH8 111
144#define IRQ_PH9 112 115#define IRQ_PH9 112
145#define IRQ_PH10 113 116#define IRQ_PH10 113
146#define IRQ_PH11 114 117#define IRQ_PH11 114
147#define IRQ_PH12 115 118#define IRQ_PH12 115
148#define IRQ_PH13 116 119#define IRQ_PH13 116
149#define IRQ_PH14 117 120#define IRQ_PH14 117
150#define IRQ_PH15 118 121#define IRQ_PH15 118
151 122
152#define GPIO_IRQ_BASE IRQ_PF0 123#define GPIO_IRQ_BASE IRQ_PF0
153 124
154#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */ 125#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */
155#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */ 126#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */
156#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */ 127#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */
157#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */ 128#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */
158#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */ 129#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */
159#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */ 130#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */
160#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */ 131#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */
161#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */ 132#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */
162 133
163#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1) 134#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1)
164#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
165
166#define IVG7 7
167#define IVG8 8
168#define IVG9 9
169#define IVG10 10
170#define IVG11 11
171#define IVG12 12
172#define IVG13 13
173#define IVG14 14
174#define IVG15 15
175 135
176/* IAR0 BIT FIELDS */ 136/* IAR0 BIT FIELDS */
177#define IRQ_PLL_WAKEUP_POS 0 137#define IRQ_PLL_WAKEUP_POS 0
178#define IRQ_DMA0_ERROR_POS 4 138#define IRQ_DMA0_ERROR_POS 4
179#define IRQ_DMAR0_BLK_POS 8 139#define IRQ_DMAR0_BLK_POS 8
180#define IRQ_DMAR1_BLK_POS 12 140#define IRQ_DMAR1_BLK_POS 12
181#define IRQ_DMAR0_OVR_POS 16 141#define IRQ_DMAR0_OVR_POS 16
182#define IRQ_DMAR1_OVR_POS 20 142#define IRQ_DMAR1_OVR_POS 20
183#define IRQ_PPI_ERROR_POS 24 143#define IRQ_PPI_ERROR_POS 24
184#define IRQ_MAC_ERROR_POS 28 144#define IRQ_MAC_ERROR_POS 28
185 145
186/* IAR1 BIT FIELDS */ 146/* IAR1 BIT FIELDS */
187#define IRQ_SPORT0_ERROR_POS 0 147#define IRQ_SPORT0_ERROR_POS 0
188#define IRQ_SPORT1_ERROR_POS 4 148#define IRQ_SPORT1_ERROR_POS 4
189#define IRQ_PTP_ERROR_POS 8 149#define IRQ_PTP_ERROR_POS 8
190#define IRQ_UART0_ERROR_POS 16 150#define IRQ_UART0_ERROR_POS 16
191#define IRQ_UART1_ERROR_POS 20 151#define IRQ_UART1_ERROR_POS 20
192#define IRQ_RTC_POS 24 152#define IRQ_RTC_POS 24
193#define IRQ_PPI_POS 28 153#define IRQ_PPI_POS 28
194 154
195/* IAR2 BIT FIELDS */ 155/* IAR2 BIT FIELDS */
196#define IRQ_SPORT0_RX_POS 0 156#define IRQ_SPORT0_RX_POS 0
@@ -199,19 +159,19 @@
199#define IRQ_SPORT1_RX_POS 8 159#define IRQ_SPORT1_RX_POS 8
200#define IRQ_SPI1_POS 8 160#define IRQ_SPI1_POS 8
201#define IRQ_SPORT1_TX_POS 12 161#define IRQ_SPORT1_TX_POS 12
202#define IRQ_TWI_POS 16 162#define IRQ_TWI_POS 16
203#define IRQ_SPI0_POS 20 163#define IRQ_SPI0_POS 20
204#define IRQ_UART0_RX_POS 24 164#define IRQ_UART0_RX_POS 24
205#define IRQ_UART0_TX_POS 28 165#define IRQ_UART0_TX_POS 28
206 166
207/* IAR3 BIT FIELDS */ 167/* IAR3 BIT FIELDS */
208#define IRQ_UART1_RX_POS 0 168#define IRQ_UART1_RX_POS 0
209#define IRQ_UART1_TX_POS 4 169#define IRQ_UART1_TX_POS 4
210#define IRQ_OPTSEC_POS 8 170#define IRQ_OPTSEC_POS 8
211#define IRQ_CNT_POS 12 171#define IRQ_CNT_POS 12
212#define IRQ_MAC_RX_POS 16 172#define IRQ_MAC_RX_POS 16
213#define IRQ_PORTH_INTA_POS 20 173#define IRQ_PORTH_INTA_POS 20
214#define IRQ_MAC_TX_POS 24 174#define IRQ_MAC_TX_POS 24
215#define IRQ_PORTH_INTB_POS 28 175#define IRQ_PORTH_INTB_POS 28
216 176
217/* IAR4 BIT FIELDS */ 177/* IAR4 BIT FIELDS */
@@ -227,19 +187,19 @@
227/* IAR5 BIT FIELDS */ 187/* IAR5 BIT FIELDS */
228#define IRQ_PORTG_INTA_POS 0 188#define IRQ_PORTG_INTA_POS 0
229#define IRQ_PORTG_INTB_POS 4 189#define IRQ_PORTG_INTB_POS 4
230#define IRQ_MEM_DMA0_POS 8 190#define IRQ_MEM_DMA0_POS 8
231#define IRQ_MEM_DMA1_POS 12 191#define IRQ_MEM_DMA1_POS 12
232#define IRQ_WATCH_POS 16 192#define IRQ_WATCH_POS 16
233#define IRQ_PORTF_INTA_POS 20 193#define IRQ_PORTF_INTA_POS 20
234#define IRQ_PORTF_INTB_POS 24 194#define IRQ_PORTF_INTB_POS 24
235#define IRQ_SPI0_ERROR_POS 28 195#define IRQ_SPI0_ERROR_POS 28
236 196
237/* IAR6 BIT FIELDS */ 197/* IAR6 BIT FIELDS */
238#define IRQ_SPI1_ERROR_POS 0 198#define IRQ_SPI1_ERROR_POS 0
239#define IRQ_RSI_INT0_POS 12 199#define IRQ_RSI_INT0_POS 12
240#define IRQ_RSI_INT1_POS 16 200#define IRQ_RSI_INT1_POS 16
241#define IRQ_PWM_TRIP_POS 20 201#define IRQ_PWM_TRIP_POS 20
242#define IRQ_PWM_SYNC_POS 24 202#define IRQ_PWM_SYNC_POS 24
243#define IRQ_PTP_STAT_POS 28 203#define IRQ_PTP_STAT_POS 28
244 204
245#endif /* _BF518_IRQ_H_ */ 205#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 2cd2ff6f3043..e67ac7720668 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -26,6 +26,7 @@
26#include <asm/portmux.h> 26#include <asm/portmux.h>
27#include <asm/dpmc.h> 27#include <asm/dpmc.h>
28#include <linux/spi/ad7877.h> 28#include <linux/spi/ad7877.h>
29#include <asm/bfin_sport.h>
29 30
30/* 31/*
31 * Name the Board for the /proc/cpuinfo 32 * Name the Board for the /proc/cpuinfo
@@ -526,11 +527,69 @@ static struct bfin5xx_spi_chip spidev_chip_info = {
526}; 527};
527#endif 528#endif
528 529
530#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
531 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
532
533static const u16 bfin_snd_pin[][7] = {
534 {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
535 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0, 0},
536 {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
537 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_TFS, 0},
538};
539
540static struct bfin_snd_platform_data bfin_snd_data[] = {
541 {
542 .pin_req = &bfin_snd_pin[0][0],
543 },
544 {
545 .pin_req = &bfin_snd_pin[1][0],
546 },
547};
548
549#define BFIN_SND_RES(x) \
550 [x] = { \
551 { \
552 .start = SPORT##x##_TCR1, \
553 .end = SPORT##x##_TCR1, \
554 .flags = IORESOURCE_MEM \
555 }, \
556 { \
557 .start = CH_SPORT##x##_RX, \
558 .end = CH_SPORT##x##_RX, \
559 .flags = IORESOURCE_DMA, \
560 }, \
561 { \
562 .start = CH_SPORT##x##_TX, \
563 .end = CH_SPORT##x##_TX, \
564 .flags = IORESOURCE_DMA, \
565 }, \
566 { \
567 .start = IRQ_SPORT##x##_ERROR, \
568 .end = IRQ_SPORT##x##_ERROR, \
569 .flags = IORESOURCE_IRQ, \
570 } \
571 }
572
573static struct resource bfin_snd_resources[][4] = {
574 BFIN_SND_RES(0),
575 BFIN_SND_RES(1),
576};
577
578static struct platform_device bfin_pcm = {
579 .name = "bfin-pcm-audio",
580 .id = -1,
581};
582#endif
583
529#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 584#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
530static struct platform_device bfin_i2s = { 585static struct platform_device bfin_i2s = {
531 .name = "bfin-i2s", 586 .name = "bfin-i2s",
532 .id = CONFIG_SND_BF5XX_SPORT_NUM, 587 .id = CONFIG_SND_BF5XX_SPORT_NUM,
533 /* TODO: add platform data here */ 588 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
589 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
590 .dev = {
591 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
592 },
534}; 593};
535#endif 594#endif
536 595
@@ -538,7 +597,11 @@ static struct platform_device bfin_i2s = {
538static struct platform_device bfin_tdm = { 597static struct platform_device bfin_tdm = {
539 .name = "bfin-tdm", 598 .name = "bfin-tdm",
540 .id = CONFIG_SND_BF5XX_SPORT_NUM, 599 .id = CONFIG_SND_BF5XX_SPORT_NUM,
541 /* TODO: add platform data here */ 600 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
601 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
602 .dev = {
603 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
604 },
542}; 605};
543#endif 606#endif
544 607
@@ -583,7 +646,9 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
583 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ 646 .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
584 .bus_num = 0, 647 .bus_num = 0,
585 .chip_select = 4, 648 .chip_select = 4,
649 .platform_data = "ad1836",
586 .controller_data = &ad1836_spi_chip_info, 650 .controller_data = &ad1836_spi_chip_info,
651 .mode = SPI_MODE_3,
587 }, 652 },
588#endif 653#endif
589#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) 654#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
@@ -1211,6 +1276,11 @@ static struct platform_device *stamp_devices[] __initdata = {
1211 &ezkit_flash_device, 1276 &ezkit_flash_device,
1212#endif 1277#endif
1213 1278
1279#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
1280 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
1281 &bfin_pcm,
1282#endif
1283
1214#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 1284#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
1215 &bfin_i2s, 1285 &bfin_i2s,
1216#endif 1286#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index 9358afa05c90..e66a7e89cd3c 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -5,14 +5,14 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision E, 03/15/2010; ADSP-BF526 Blackfin Processor Anomaly List 14 * - Revision E, 03/15/2010; ADSP-BF526 Blackfin Processor Anomaly List
15 * - Revision G, 08/25/2009; ADSP-BF527 Blackfin Processor Anomaly List 15 * - Revision H, 04/29/2010; ADSP-BF527 Blackfin Processor Anomaly List
16 */ 16 */
17 17
18#ifndef _MACH_ANOMALY_H_ 18#ifndef _MACH_ANOMALY_H_
@@ -220,6 +220,8 @@
220#define ANOMALY_05000483 (1) 220#define ANOMALY_05000483 (1)
221/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */ 221/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
222#define ANOMALY_05000485 (_ANOMALY_BF526_BF527(< 2, < 3)) 222#define ANOMALY_05000485 (_ANOMALY_BF526_BF527(< 2, < 3))
223/* The CODEC Zero-Cross Detect Feature is not Functional */
224#define ANOMALY_05000487 (1)
223/* IFLUSH sucks at life */ 225/* IFLUSH sucks at life */
224#define ANOMALY_05000491 (1) 226#define ANOMALY_05000491 (1)
225 227
@@ -268,11 +270,13 @@
268#define ANOMALY_05000323 (0) 270#define ANOMALY_05000323 (0)
269#define ANOMALY_05000362 (1) 271#define ANOMALY_05000362 (1)
270#define ANOMALY_05000363 (0) 272#define ANOMALY_05000363 (0)
273#define ANOMALY_05000383 (0)
271#define ANOMALY_05000400 (0) 274#define ANOMALY_05000400 (0)
272#define ANOMALY_05000402 (0) 275#define ANOMALY_05000402 (0)
273#define ANOMALY_05000412 (0) 276#define ANOMALY_05000412 (0)
274#define ANOMALY_05000447 (0) 277#define ANOMALY_05000447 (0)
275#define ANOMALY_05000448 (0) 278#define ANOMALY_05000448 (0)
276#define ANOMALY_05000474 (0) 279#define ANOMALY_05000474 (0)
280#define ANOMALY_05000480 (0)
277 281
278#endif 282#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
index 618dfcdfa91a..2c12e879aa4e 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
@@ -1007,18 +1007,18 @@
1007#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val) 1007#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
1008#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW) 1008#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
1009#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val) 1009#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
1010#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS) 1010#define bfin_read_PORTF_HYSTERESIS() bfin_read16(PORTF_HYSTERESIS)
1011#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val) 1011#define bfin_write_PORTF_HYSTERESIS(val) bfin_write16(PORTF_HYSTERESIS, val)
1012#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS) 1012#define bfin_read_PORTG_HYSTERESIS() bfin_read16(PORTG_HYSTERESIS)
1013#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val) 1013#define bfin_write_PORTG_HYSTERESIS(val) bfin_write16(PORTG_HYSTERESIS, val)
1014#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS) 1014#define bfin_read_PORTH_HYSTERESIS() bfin_read16(PORTH_HYSTERESIS)
1015#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val) 1015#define bfin_write_PORTH_HYSTERESIS(val) bfin_write16(PORTH_HYSTERESIS, val)
1016#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE) 1016#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
1017#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val) 1017#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
1018#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW) 1018#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
1019#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val) 1019#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
1020#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS) 1020#define bfin_read_MISCPORT_HYSTERESIS() bfin_read16(MISCPORT_HYSTERESIS)
1021#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val) 1021#define bfin_write_MISCPORT_HYSTERESIS(val) bfin_write16(MISCPORT_HYSTERESIS, val)
1022 1022
1023/* HOST Port Registers */ 1023/* HOST Port Registers */
1024 1024
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF522.h b/arch/blackfin/mach-bf527/include/mach/defBF522.h
index 84ef11e52644..37d353a19722 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF522.h
@@ -562,12 +562,12 @@
562#define PORTF_SLEW 0xFFC03230 /* Port F slew control */ 562#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
563#define PORTG_SLEW 0xFFC03234 /* Port G slew control */ 563#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
564#define PORTH_SLEW 0xFFC03238 /* Port H slew control */ 564#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
565#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */ 565#define PORTF_HYSTERESIS 0xFFC03240 /* Port F Schmitt trigger control */
566#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */ 566#define PORTG_HYSTERESIS 0xFFC03244 /* Port G Schmitt trigger control */
567#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */ 567#define PORTH_HYSTERESIS 0xFFC03248 /* Port H Schmitt trigger control */
568#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */ 568#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
569#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */ 569#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
570#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */ 570#define MISCPORT_HYSTERESIS 0xFFC03288 /* Misc Port Schmitt trigger control */
571 571
572 572
573/*********************************************************************************** 573/***********************************************************************************
diff --git a/arch/blackfin/mach-bf527/include/mach/irq.h b/arch/blackfin/mach-bf527/include/mach/irq.h
index 704d9253e41d..ed7310ff819b 100644
--- a/arch/blackfin/mach-bf527/include/mach/irq.h
+++ b/arch/blackfin/mach-bf527/include/mach/irq.h
@@ -7,38 +7,9 @@
7#ifndef _BF527_IRQ_H_ 7#ifndef _BF527_IRQ_H_
8#define _BF527_IRQ_H_ 8#define _BF527_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions 11
12 Event Source Core Event Name 12#define NR_PERI_INTS (2 * 32)
13 Core Emulation **
14 Events (highest priority) EMU 0
15 Reset RST 1
16 NMI NMI 2
17 Exception EVX 3
18 Reserved -- 4
19 Hardware Error IVHW 5
20 Core Timer IVTMR 6 *
21
22 .....
23
24 Software Interrupt 1 IVG14 31
25 Software Interrupt 2 --
26 (lowest priority) IVG15 32 *
27*/
28
29#define NR_PERI_INTS (2 * 32)
30
31/* The ABSTRACT IRQ definitions */
32/** the first seven of the following are fixed, the rest you change if you need to **/
33#define IRQ_EMU 0 /* Emulation */
34#define IRQ_RST 1 /* reset */
35#define IRQ_NMI 2 /* Non Maskable */
36#define IRQ_EVX 3 /* Exception */
37#define IRQ_UNUSED 4 /* - unused interrupt */
38#define IRQ_HWERR 5 /* Hardware Error */
39#define IRQ_CORETMR 6 /* Core timer */
40
41#define BFIN_IRQ(x) ((x) + 7)
42 13
43#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */ 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
44#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */ 15#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */
@@ -53,21 +24,21 @@
53#define IRQ_UART0_ERROR BFIN_IRQ(12) /* UART0 Status */ 24#define IRQ_UART0_ERROR BFIN_IRQ(12) /* UART0 Status */
54#define IRQ_UART1_ERROR BFIN_IRQ(13) /* UART1 Status */ 25#define IRQ_UART1_ERROR BFIN_IRQ(13) /* UART1 Status */
55#define IRQ_RTC BFIN_IRQ(14) /* RTC */ 26#define IRQ_RTC BFIN_IRQ(14) /* RTC */
56#define IRQ_PPI BFIN_IRQ(15) /* DMA Channel 0 (PPI/NAND) */ 27#define IRQ_PPI BFIN_IRQ(15) /* DMA Channel 0 (PPI/NAND) */
57#define IRQ_SPORT0_RX BFIN_IRQ(16) /* DMA 3 Channel (SPORT0 RX) */ 28#define IRQ_SPORT0_RX BFIN_IRQ(16) /* DMA 3 Channel (SPORT0 RX) */
58#define IRQ_SPORT0_TX BFIN_IRQ(17) /* DMA 4 Channel (SPORT0 TX) */ 29#define IRQ_SPORT0_TX BFIN_IRQ(17) /* DMA 4 Channel (SPORT0 TX) */
59#define IRQ_SPORT1_RX BFIN_IRQ(18) /* DMA 5 Channel (SPORT1 RX) */ 30#define IRQ_SPORT1_RX BFIN_IRQ(18) /* DMA 5 Channel (SPORT1 RX) */
60#define IRQ_SPORT1_TX BFIN_IRQ(19) /* DMA 6 Channel (SPORT1 TX) */ 31#define IRQ_SPORT1_TX BFIN_IRQ(19) /* DMA 6 Channel (SPORT1 TX) */
61#define IRQ_TWI BFIN_IRQ(20) /* TWI */ 32#define IRQ_TWI BFIN_IRQ(20) /* TWI */
62#define IRQ_SPI BFIN_IRQ(21) /* DMA 7 Channel (SPI) */ 33#define IRQ_SPI BFIN_IRQ(21) /* DMA 7 Channel (SPI) */
63#define IRQ_UART0_RX BFIN_IRQ(22) /* DMA8 Channel (UART0 RX) */ 34#define IRQ_UART0_RX BFIN_IRQ(22) /* DMA8 Channel (UART0 RX) */
64#define IRQ_UART0_TX BFIN_IRQ(23) /* DMA9 Channel (UART0 TX) */ 35#define IRQ_UART0_TX BFIN_IRQ(23) /* DMA9 Channel (UART0 TX) */
65#define IRQ_UART1_RX BFIN_IRQ(24) /* DMA10 Channel (UART1 RX) */ 36#define IRQ_UART1_RX BFIN_IRQ(24) /* DMA10 Channel (UART1 RX) */
66#define IRQ_UART1_TX BFIN_IRQ(25) /* DMA11 Channel (UART1 TX) */ 37#define IRQ_UART1_TX BFIN_IRQ(25) /* DMA11 Channel (UART1 TX) */
67#define IRQ_OPTSEC BFIN_IRQ(26) /* OTPSEC Interrupt */ 38#define IRQ_OPTSEC BFIN_IRQ(26) /* OTPSEC Interrupt */
68#define IRQ_CNT BFIN_IRQ(27) /* GP Counter */ 39#define IRQ_CNT BFIN_IRQ(27) /* GP Counter */
69#define IRQ_MAC_RX BFIN_IRQ(28) /* DMA1 Channel (MAC RX/HDMA) */ 40#define IRQ_MAC_RX BFIN_IRQ(28) /* DMA1 Channel (MAC RX/HDMA) */
70#define IRQ_PORTH_INTA BFIN_IRQ(29) /* Port H Interrupt A */ 41#define IRQ_PORTH_INTA BFIN_IRQ(29) /* Port H Interrupt A */
71#define IRQ_MAC_TX BFIN_IRQ(30) /* DMA2 Channel (MAC TX/NAND) */ 42#define IRQ_MAC_TX BFIN_IRQ(30) /* DMA2 Channel (MAC TX/NAND) */
72#define IRQ_NFC BFIN_IRQ(30) /* DMA2 Channel (MAC TX/NAND) */ 43#define IRQ_NFC BFIN_IRQ(30) /* DMA2 Channel (MAC TX/NAND) */
73#define IRQ_PORTH_INTB BFIN_IRQ(31) /* Port H Interrupt B */ 44#define IRQ_PORTH_INTB BFIN_IRQ(31) /* Port H Interrupt B */
@@ -96,119 +67,108 @@
96#define IRQ_USB_INT2 BFIN_IRQ(54) /* USB_INT2 Interrupt */ 67#define IRQ_USB_INT2 BFIN_IRQ(54) /* USB_INT2 Interrupt */
97#define IRQ_USB_DMA BFIN_IRQ(55) /* USB_DMAINT Interrupt */ 68#define IRQ_USB_DMA BFIN_IRQ(55) /* USB_DMAINT Interrupt */
98 69
99#define SYS_IRQS BFIN_IRQ(63) /* 70 */ 70#define SYS_IRQS BFIN_IRQ(63) /* 70 */
100 71
101#define IRQ_PF0 71 72#define IRQ_PF0 71
102#define IRQ_PF1 72 73#define IRQ_PF1 72
103#define IRQ_PF2 73 74#define IRQ_PF2 73
104#define IRQ_PF3 74 75#define IRQ_PF3 74
105#define IRQ_PF4 75 76#define IRQ_PF4 75
106#define IRQ_PF5 76 77#define IRQ_PF5 76
107#define IRQ_PF6 77 78#define IRQ_PF6 77
108#define IRQ_PF7 78 79#define IRQ_PF7 78
109#define IRQ_PF8 79 80#define IRQ_PF8 79
110#define IRQ_PF9 80 81#define IRQ_PF9 80
111#define IRQ_PF10 81 82#define IRQ_PF10 81
112#define IRQ_PF11 82 83#define IRQ_PF11 82
113#define IRQ_PF12 83 84#define IRQ_PF12 83
114#define IRQ_PF13 84 85#define IRQ_PF13 84
115#define IRQ_PF14 85 86#define IRQ_PF14 85
116#define IRQ_PF15 86 87#define IRQ_PF15 86
117 88
118#define IRQ_PG0 87 89#define IRQ_PG0 87
119#define IRQ_PG1 88 90#define IRQ_PG1 88
120#define IRQ_PG2 89 91#define IRQ_PG2 89
121#define IRQ_PG3 90 92#define IRQ_PG3 90
122#define IRQ_PG4 91 93#define IRQ_PG4 91
123#define IRQ_PG5 92 94#define IRQ_PG5 92
124#define IRQ_PG6 93 95#define IRQ_PG6 93
125#define IRQ_PG7 94 96#define IRQ_PG7 94
126#define IRQ_PG8 95 97#define IRQ_PG8 95
127#define IRQ_PG9 96 98#define IRQ_PG9 96
128#define IRQ_PG10 97 99#define IRQ_PG10 97
129#define IRQ_PG11 98 100#define IRQ_PG11 98
130#define IRQ_PG12 99 101#define IRQ_PG12 99
131#define IRQ_PG13 100 102#define IRQ_PG13 100
132#define IRQ_PG14 101 103#define IRQ_PG14 101
133#define IRQ_PG15 102 104#define IRQ_PG15 102
134 105
135#define IRQ_PH0 103 106#define IRQ_PH0 103
136#define IRQ_PH1 104 107#define IRQ_PH1 104
137#define IRQ_PH2 105 108#define IRQ_PH2 105
138#define IRQ_PH3 106 109#define IRQ_PH3 106
139#define IRQ_PH4 107 110#define IRQ_PH4 107
140#define IRQ_PH5 108 111#define IRQ_PH5 108
141#define IRQ_PH6 109 112#define IRQ_PH6 109
142#define IRQ_PH7 110 113#define IRQ_PH7 110
143#define IRQ_PH8 111 114#define IRQ_PH8 111
144#define IRQ_PH9 112 115#define IRQ_PH9 112
145#define IRQ_PH10 113 116#define IRQ_PH10 113
146#define IRQ_PH11 114 117#define IRQ_PH11 114
147#define IRQ_PH12 115 118#define IRQ_PH12 115
148#define IRQ_PH13 116 119#define IRQ_PH13 116
149#define IRQ_PH14 117 120#define IRQ_PH14 117
150#define IRQ_PH15 118 121#define IRQ_PH15 118
151 122
152#define GPIO_IRQ_BASE IRQ_PF0 123#define GPIO_IRQ_BASE IRQ_PF0
153 124
154#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */ 125#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */
155#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */ 126#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */
156#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */ 127#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */
157#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */ 128#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */
158#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */ 129#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */
159#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */ 130#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */
160#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */ 131#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */
161#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */ 132#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */
162 133
163#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1) 134#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1)
164#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
165
166#define IVG7 7
167#define IVG8 8
168#define IVG9 9
169#define IVG10 10
170#define IVG11 11
171#define IVG12 12
172#define IVG13 13
173#define IVG14 14
174#define IVG15 15
175 135
176/* IAR0 BIT FIELDS */ 136/* IAR0 BIT FIELDS */
177#define IRQ_PLL_WAKEUP_POS 0 137#define IRQ_PLL_WAKEUP_POS 0
178#define IRQ_DMA0_ERROR_POS 4 138#define IRQ_DMA0_ERROR_POS 4
179#define IRQ_DMAR0_BLK_POS 8 139#define IRQ_DMAR0_BLK_POS 8
180#define IRQ_DMAR1_BLK_POS 12 140#define IRQ_DMAR1_BLK_POS 12
181#define IRQ_DMAR0_OVR_POS 16 141#define IRQ_DMAR0_OVR_POS 16
182#define IRQ_DMAR1_OVR_POS 20 142#define IRQ_DMAR1_OVR_POS 20
183#define IRQ_PPI_ERROR_POS 24 143#define IRQ_PPI_ERROR_POS 24
184#define IRQ_MAC_ERROR_POS 28 144#define IRQ_MAC_ERROR_POS 28
185 145
186/* IAR1 BIT FIELDS */ 146/* IAR1 BIT FIELDS */
187#define IRQ_SPORT0_ERROR_POS 0 147#define IRQ_SPORT0_ERROR_POS 0
188#define IRQ_SPORT1_ERROR_POS 4 148#define IRQ_SPORT1_ERROR_POS 4
189#define IRQ_UART0_ERROR_POS 16 149#define IRQ_UART0_ERROR_POS 16
190#define IRQ_UART1_ERROR_POS 20 150#define IRQ_UART1_ERROR_POS 20
191#define IRQ_RTC_POS 24 151#define IRQ_RTC_POS 24
192#define IRQ_PPI_POS 28 152#define IRQ_PPI_POS 28
193 153
194/* IAR2 BIT FIELDS */ 154/* IAR2 BIT FIELDS */
195#define IRQ_SPORT0_RX_POS 0 155#define IRQ_SPORT0_RX_POS 0
196#define IRQ_SPORT0_TX_POS 4 156#define IRQ_SPORT0_TX_POS 4
197#define IRQ_SPORT1_RX_POS 8 157#define IRQ_SPORT1_RX_POS 8
198#define IRQ_SPORT1_TX_POS 12 158#define IRQ_SPORT1_TX_POS 12
199#define IRQ_TWI_POS 16 159#define IRQ_TWI_POS 16
200#define IRQ_SPI_POS 20 160#define IRQ_SPI_POS 20
201#define IRQ_UART0_RX_POS 24 161#define IRQ_UART0_RX_POS 24
202#define IRQ_UART0_TX_POS 28 162#define IRQ_UART0_TX_POS 28
203 163
204/* IAR3 BIT FIELDS */ 164/* IAR3 BIT FIELDS */
205#define IRQ_UART1_RX_POS 0 165#define IRQ_UART1_RX_POS 0
206#define IRQ_UART1_TX_POS 4 166#define IRQ_UART1_TX_POS 4
207#define IRQ_OPTSEC_POS 8 167#define IRQ_OPTSEC_POS 8
208#define IRQ_CNT_POS 12 168#define IRQ_CNT_POS 12
209#define IRQ_MAC_RX_POS 16 169#define IRQ_MAC_RX_POS 16
210#define IRQ_PORTH_INTA_POS 20 170#define IRQ_PORTH_INTA_POS 20
211#define IRQ_MAC_TX_POS 24 171#define IRQ_MAC_TX_POS 24
212#define IRQ_PORTH_INTB_POS 28 172#define IRQ_PORTH_INTB_POS 28
213 173
214/* IAR4 BIT FIELDS */ 174/* IAR4 BIT FIELDS */
@@ -224,21 +184,21 @@
224/* IAR5 BIT FIELDS */ 184/* IAR5 BIT FIELDS */
225#define IRQ_PORTG_INTA_POS 0 185#define IRQ_PORTG_INTA_POS 0
226#define IRQ_PORTG_INTB_POS 4 186#define IRQ_PORTG_INTB_POS 4
227#define IRQ_MEM_DMA0_POS 8 187#define IRQ_MEM_DMA0_POS 8
228#define IRQ_MEM_DMA1_POS 12 188#define IRQ_MEM_DMA1_POS 12
229#define IRQ_WATCH_POS 16 189#define IRQ_WATCH_POS 16
230#define IRQ_PORTF_INTA_POS 20 190#define IRQ_PORTF_INTA_POS 20
231#define IRQ_PORTF_INTB_POS 24 191#define IRQ_PORTF_INTB_POS 24
232#define IRQ_SPI_ERROR_POS 28 192#define IRQ_SPI_ERROR_POS 28
233 193
234/* IAR6 BIT FIELDS */ 194/* IAR6 BIT FIELDS */
235#define IRQ_NFC_ERROR_POS 0 195#define IRQ_NFC_ERROR_POS 0
236#define IRQ_HDMA_ERROR_POS 4 196#define IRQ_HDMA_ERROR_POS 4
237#define IRQ_HDMA_POS 8 197#define IRQ_HDMA_POS 8
238#define IRQ_USB_EINT_POS 12 198#define IRQ_USB_EINT_POS 12
239#define IRQ_USB_INT0_POS 16 199#define IRQ_USB_INT0_POS 16
240#define IRQ_USB_INT1_POS 20 200#define IRQ_USB_INT1_POS 20
241#define IRQ_USB_INT2_POS 24 201#define IRQ_USB_INT2_POS 24
242#define IRQ_USB_DMA_POS 28 202#define IRQ_USB_DMA_POS 28
243 203
244#endif /* _BF527_IRQ_H_ */ 204#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 78f872187918..72aa59440f82 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -5,13 +5,13 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision E, 09/18/2008; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List 14 * - Revision F, 05/25/2010; ADSP-BF531/BF532/BF533 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -206,6 +206,10 @@
206#define ANOMALY_05000443 (1) 206#define ANOMALY_05000443 (1)
207/* False Hardware Error when RETI Points to Invalid Memory */ 207/* False Hardware Error when RETI Points to Invalid Memory */
208#define ANOMALY_05000461 (1) 208#define ANOMALY_05000461 (1)
209/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
210#define ANOMALY_05000462 (1)
211/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */
212#define ANOMALY_05000471 (1)
209/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 213/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
210#define ANOMALY_05000473 (1) 214#define ANOMALY_05000473 (1)
211/* Possible Lockup Condition whem Modifying PLL from External Memory */ 215/* Possible Lockup Condition whem Modifying PLL from External Memory */
@@ -351,12 +355,14 @@
351#define ANOMALY_05000362 (1) 355#define ANOMALY_05000362 (1)
352#define ANOMALY_05000364 (0) 356#define ANOMALY_05000364 (0)
353#define ANOMALY_05000380 (0) 357#define ANOMALY_05000380 (0)
358#define ANOMALY_05000383 (0)
354#define ANOMALY_05000386 (1) 359#define ANOMALY_05000386 (1)
355#define ANOMALY_05000389 (0) 360#define ANOMALY_05000389 (0)
356#define ANOMALY_05000412 (0) 361#define ANOMALY_05000412 (0)
357#define ANOMALY_05000430 (0) 362#define ANOMALY_05000430 (0)
358#define ANOMALY_05000432 (0) 363#define ANOMALY_05000432 (0)
359#define ANOMALY_05000435 (0) 364#define ANOMALY_05000435 (0)
365#define ANOMALY_05000440 (0)
360#define ANOMALY_05000447 (0) 366#define ANOMALY_05000447 (0)
361#define ANOMALY_05000448 (0) 367#define ANOMALY_05000448 (0)
362#define ANOMALY_05000456 (0) 368#define ANOMALY_05000456 (0)
@@ -364,6 +370,7 @@
364#define ANOMALY_05000465 (0) 370#define ANOMALY_05000465 (0)
365#define ANOMALY_05000467 (0) 371#define ANOMALY_05000467 (0)
366#define ANOMALY_05000474 (0) 372#define ANOMALY_05000474 (0)
373#define ANOMALY_05000480 (0)
367#define ANOMALY_05000485 (0) 374#define ANOMALY_05000485 (0)
368 375
369#endif 376#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/irq.h b/arch/blackfin/mach-bf533/include/mach/irq.h
index 1f7e9765d954..709733754142 100644
--- a/arch/blackfin/mach-bf533/include/mach/irq.h
+++ b/arch/blackfin/mach-bf533/include/mach/irq.h
@@ -7,83 +7,36 @@
7#ifndef _BF533_IRQ_H_ 7#ifndef _BF533_IRQ_H_
8#define _BF533_IRQ_H_ 8#define _BF533_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions
12 Event Source Core Event Name
13Core Emulation **
14 Events (highest priority) EMU 0
15 Reset RST 1
16 NMI NMI 2
17 Exception EVX 3
18 Reserved -- 4
19 Hardware Error IVHW 5
20 Core Timer IVTMR 6 *
21 PLL Wakeup Interrupt IVG7 7
22 DMA Error (generic) IVG7 8
23 PPI Error Interrupt IVG7 9
24 SPORT0 Error Interrupt IVG7 10
25 SPORT1 Error Interrupt IVG7 11
26 SPI Error Interrupt IVG7 12
27 UART Error Interrupt IVG7 13
28 RTC Interrupt IVG8 14
29 DMA0 Interrupt (PPI) IVG8 15
30 DMA1 (SPORT0 RX) IVG9 16
31 DMA2 (SPORT0 TX) IVG9 17
32 DMA3 (SPORT1 RX) IVG9 18
33 DMA4 (SPORT1 TX) IVG9 19
34 DMA5 (PPI) IVG10 20
35 DMA6 (UART RX) IVG10 21
36 DMA7 (UART TX) IVG10 22
37 Timer0 IVG11 23
38 Timer1 IVG11 24
39 Timer2 IVG11 25
40 PF Interrupt A IVG12 26
41 PF Interrupt B IVG12 27
42 DMA8/9 Interrupt IVG13 28
43 DMA10/11 Interrupt IVG13 29
44 Watchdog Timer IVG13 30
45 11
46 Softirq IVG14 31 12#define NR_PERI_INTS 24
47 System Call --
48 (lowest priority) IVG15 32 *
49 */
50#define SYS_IRQS 31
51#define NR_PERI_INTS 24
52 13
53/* The ABSTRACT IRQ definitions */ 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
54/** the first seven of the following are fixed, the rest you change if you need to **/ 15#define IRQ_DMA_ERROR BFIN_IRQ(1) /* DMA Error (general) */
55#define IRQ_EMU 0 /*Emulation */ 16#define IRQ_PPI_ERROR BFIN_IRQ(2) /* PPI Error Interrupt */
56#define IRQ_RST 1 /*reset */ 17#define IRQ_SPORT0_ERROR BFIN_IRQ(3) /* SPORT0 Error Interrupt */
57#define IRQ_NMI 2 /*Non Maskable */ 18#define IRQ_SPORT1_ERROR BFIN_IRQ(4) /* SPORT1 Error Interrupt */
58#define IRQ_EVX 3 /*Exception */ 19#define IRQ_SPI_ERROR BFIN_IRQ(5) /* SPI Error Interrupt */
59#define IRQ_UNUSED 4 /*- unused interrupt*/ 20#define IRQ_UART0_ERROR BFIN_IRQ(6) /* UART Error Interrupt */
60#define IRQ_HWERR 5 /*Hardware Error */ 21#define IRQ_RTC BFIN_IRQ(7) /* RTC Interrupt */
61#define IRQ_CORETMR 6 /*Core timer */ 22#define IRQ_PPI BFIN_IRQ(8) /* DMA0 Interrupt (PPI) */
23#define IRQ_SPORT0_RX BFIN_IRQ(9) /* DMA1 Interrupt (SPORT0 RX) */
24#define IRQ_SPORT0_TX BFIN_IRQ(10) /* DMA2 Interrupt (SPORT0 TX) */
25#define IRQ_SPORT1_RX BFIN_IRQ(11) /* DMA3 Interrupt (SPORT1 RX) */
26#define IRQ_SPORT1_TX BFIN_IRQ(12) /* DMA4 Interrupt (SPORT1 TX) */
27#define IRQ_SPI BFIN_IRQ(13) /* DMA5 Interrupt (SPI) */
28#define IRQ_UART0_RX BFIN_IRQ(14) /* DMA6 Interrupt (UART RX) */
29#define IRQ_UART0_TX BFIN_IRQ(15) /* DMA7 Interrupt (UART TX) */
30#define IRQ_TIMER0 BFIN_IRQ(16) /* Timer 0 */
31#define IRQ_TIMER1 BFIN_IRQ(17) /* Timer 1 */
32#define IRQ_TIMER2 BFIN_IRQ(18) /* Timer 2 */
33#define IRQ_PROG_INTA BFIN_IRQ(19) /* Programmable Flags A (8) */
34#define IRQ_PROG_INTB BFIN_IRQ(20) /* Programmable Flags B (8) */
35#define IRQ_MEM_DMA0 BFIN_IRQ(21) /* DMA8/9 Interrupt (Memory DMA Stream 0) */
36#define IRQ_MEM_DMA1 BFIN_IRQ(22) /* DMA10/11 Interrupt (Memory DMA Stream 1) */
37#define IRQ_WATCH BFIN_IRQ(23) /* Watch Dog Timer */
62 38
63#define IRQ_PLL_WAKEUP 7 /*PLL Wakeup Interrupt */ 39#define SYS_IRQS 31
64#define IRQ_DMA_ERROR 8 /*DMA Error (general) */
65#define IRQ_PPI_ERROR 9 /*PPI Error Interrupt */
66#define IRQ_SPORT0_ERROR 10 /*SPORT0 Error Interrupt */
67#define IRQ_SPORT1_ERROR 11 /*SPORT1 Error Interrupt */
68#define IRQ_SPI_ERROR 12 /*SPI Error Interrupt */
69#define IRQ_UART0_ERROR 13 /*UART Error Interrupt */
70#define IRQ_RTC 14 /*RTC Interrupt */
71#define IRQ_PPI 15 /*DMA0 Interrupt (PPI) */
72#define IRQ_SPORT0_RX 16 /*DMA1 Interrupt (SPORT0 RX) */
73#define IRQ_SPORT0_TX 17 /*DMA2 Interrupt (SPORT0 TX) */
74#define IRQ_SPORT1_RX 18 /*DMA3 Interrupt (SPORT1 RX) */
75#define IRQ_SPORT1_TX 19 /*DMA4 Interrupt (SPORT1 TX) */
76#define IRQ_SPI 20 /*DMA5 Interrupt (SPI) */
77#define IRQ_UART0_RX 21 /*DMA6 Interrupt (UART RX) */
78#define IRQ_UART0_TX 22 /*DMA7 Interrupt (UART TX) */
79#define IRQ_TIMER0 23 /*Timer 0 */
80#define IRQ_TIMER1 24 /*Timer 1 */
81#define IRQ_TIMER2 25 /*Timer 2 */
82#define IRQ_PROG_INTA 26 /*Programmable Flags A (8) */
83#define IRQ_PROG_INTB 27 /*Programmable Flags B (8) */
84#define IRQ_MEM_DMA0 28 /*DMA8/9 Interrupt (Memory DMA Stream 0) */
85#define IRQ_MEM_DMA1 29 /*DMA10/11 Interrupt (Memory DMA Stream 1) */
86#define IRQ_WATCH 30 /*Watch Dog Timer */
87 40
88#define IRQ_PF0 33 41#define IRQ_PF0 33
89#define IRQ_PF1 34 42#define IRQ_PF1 34
@@ -105,46 +58,35 @@ Core Emulation **
105#define GPIO_IRQ_BASE IRQ_PF0 58#define GPIO_IRQ_BASE IRQ_PF0
106 59
107#define NR_MACH_IRQS (IRQ_PF15 + 1) 60#define NR_MACH_IRQS (IRQ_PF15 + 1)
108#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
109
110#define IVG7 7
111#define IVG8 8
112#define IVG9 9
113#define IVG10 10
114#define IVG11 11
115#define IVG12 12
116#define IVG13 13
117#define IVG14 14
118#define IVG15 15
119 61
120/* IAR0 BIT FIELDS*/ 62/* IAR0 BIT FIELDS */
121#define RTC_ERROR_POS 28 63#define RTC_ERROR_POS 28
122#define UART_ERROR_POS 24 64#define UART_ERROR_POS 24
123#define SPORT1_ERROR_POS 20 65#define SPORT1_ERROR_POS 20
124#define SPI_ERROR_POS 16 66#define SPI_ERROR_POS 16
125#define SPORT0_ERROR_POS 12 67#define SPORT0_ERROR_POS 12
126#define PPI_ERROR_POS 8 68#define PPI_ERROR_POS 8
127#define DMA_ERROR_POS 4 69#define DMA_ERROR_POS 4
128#define PLLWAKE_ERROR_POS 0 70#define PLLWAKE_ERROR_POS 0
129 71
130/* IAR1 BIT FIELDS*/ 72/* IAR1 BIT FIELDS */
131#define DMA7_UARTTX_POS 28 73#define DMA7_UARTTX_POS 28
132#define DMA6_UARTRX_POS 24 74#define DMA6_UARTRX_POS 24
133#define DMA5_SPI_POS 20 75#define DMA5_SPI_POS 20
134#define DMA4_SPORT1TX_POS 16 76#define DMA4_SPORT1TX_POS 16
135#define DMA3_SPORT1RX_POS 12 77#define DMA3_SPORT1RX_POS 12
136#define DMA2_SPORT0TX_POS 8 78#define DMA2_SPORT0TX_POS 8
137#define DMA1_SPORT0RX_POS 4 79#define DMA1_SPORT0RX_POS 4
138#define DMA0_PPI_POS 0 80#define DMA0_PPI_POS 0
139 81
140/* IAR2 BIT FIELDS*/ 82/* IAR2 BIT FIELDS */
141#define WDTIMER_POS 28 83#define WDTIMER_POS 28
142#define MEMDMA1_POS 24 84#define MEMDMA1_POS 24
143#define MEMDMA0_POS 20 85#define MEMDMA0_POS 20
144#define PFB_POS 16 86#define PFB_POS 16
145#define PFA_POS 12 87#define PFA_POS 12
146#define TIMER2_POS 8 88#define TIMER2_POS 8
147#define TIMER1_POS 4 89#define TIMER1_POS 4
148#define TIMER0_POS 0 90#define TIMER0_POS 0
149 91
150#endif /* _BF533_IRQ_H_ */ 92#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 3fa335405b31..e16dc4560048 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -35,6 +35,7 @@
35#include <asm/reboot.h> 35#include <asm/reboot.h>
36#include <asm/portmux.h> 36#include <asm/portmux.h>
37#include <asm/dpmc.h> 37#include <asm/dpmc.h>
38#include <asm/bfin_sport.h>
38#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE 39#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
39#include <linux/regulator/fixed.h> 40#include <linux/regulator/fixed.h>
40#endif 41#endif
@@ -2585,27 +2586,103 @@ static struct platform_device bfin_dpmc = {
2585 }, 2586 },
2586}; 2587};
2587 2588
2588#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 2589#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
2590 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
2591 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
2592
2593#define SPORT_REQ(x) \
2594 [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \
2595 P_SPORT##x##_RFS, P_SPORT##x##_DRPRI, P_SPORT##x##_RSCLK, 0}
2596
2597static const u16 bfin_snd_pin[][7] = {
2598 SPORT_REQ(0),
2599 SPORT_REQ(1),
2600};
2601
2602static struct bfin_snd_platform_data bfin_snd_data[] = {
2603 {
2604 .pin_req = &bfin_snd_pin[0][0],
2605 },
2606 {
2607 .pin_req = &bfin_snd_pin[1][0],
2608 },
2609};
2610
2611#define BFIN_SND_RES(x) \
2612 [x] = { \
2613 { \
2614 .start = SPORT##x##_TCR1, \
2615 .end = SPORT##x##_TCR1, \
2616 .flags = IORESOURCE_MEM \
2617 }, \
2618 { \
2619 .start = CH_SPORT##x##_RX, \
2620 .end = CH_SPORT##x##_RX, \
2621 .flags = IORESOURCE_DMA, \
2622 }, \
2623 { \
2624 .start = CH_SPORT##x##_TX, \
2625 .end = CH_SPORT##x##_TX, \
2626 .flags = IORESOURCE_DMA, \
2627 }, \
2628 { \
2629 .start = IRQ_SPORT##x##_ERROR, \
2630 .end = IRQ_SPORT##x##_ERROR, \
2631 .flags = IORESOURCE_IRQ, \
2632 } \
2633 }
2634
2635static struct resource bfin_snd_resources[][4] = {
2636 BFIN_SND_RES(0),
2637 BFIN_SND_RES(1),
2638};
2639
2640static struct platform_device bfin_pcm = {
2641 .name = "bfin-pcm-audio",
2642 .id = -1,
2643};
2644#endif
2645
2646#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
2647static struct platform_device bfin_ad73311_codec_device = {
2648 .name = "ad73311",
2649 .id = -1,
2650};
2651#endif
2652
2653#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
2589static struct platform_device bfin_i2s = { 2654static struct platform_device bfin_i2s = {
2590 .name = "bfin-i2s", 2655 .name = "bfin-i2s",
2591 .id = CONFIG_SND_BF5XX_SPORT_NUM, 2656 .id = CONFIG_SND_BF5XX_SPORT_NUM,
2592 /* TODO: add platform data here */ 2657 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
2658 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
2659 .dev = {
2660 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
2661 },
2593}; 2662};
2594#endif 2663#endif
2595 2664
2596#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) 2665#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
2597static struct platform_device bfin_tdm = { 2666static struct platform_device bfin_tdm = {
2598 .name = "bfin-tdm", 2667 .name = "bfin-tdm",
2599 .id = CONFIG_SND_BF5XX_SPORT_NUM, 2668 .id = CONFIG_SND_BF5XX_SPORT_NUM,
2600 /* TODO: add platform data here */ 2669 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
2670 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
2671 .dev = {
2672 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
2673 },
2601}; 2674};
2602#endif 2675#endif
2603 2676
2604#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 2677#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
2605static struct platform_device bfin_ac97 = { 2678static struct platform_device bfin_ac97 = {
2606 .name = "bfin-ac97", 2679 .name = "bfin-ac97",
2607 .id = CONFIG_SND_BF5XX_SPORT_NUM, 2680 .id = CONFIG_SND_BF5XX_SPORT_NUM,
2608 /* TODO: add platform data here */ 2681 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
2682 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
2683 .dev = {
2684 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
2685 },
2609}; 2686};
2610#endif 2687#endif
2611 2688
@@ -2796,17 +2873,28 @@ static struct platform_device *stamp_devices[] __initdata = {
2796 &stamp_flash_device, 2873 &stamp_flash_device,
2797#endif 2874#endif
2798 2875
2799#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 2876#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
2877 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
2878 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
2879 &bfin_pcm,
2880#endif
2881
2882#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
2883 &bfin_ad73311_codec_device,
2884#endif
2885
2886#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
2800 &bfin_i2s, 2887 &bfin_i2s,
2801#endif 2888#endif
2802 2889
2803#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) 2890#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
2804 &bfin_tdm, 2891 &bfin_tdm,
2805#endif 2892#endif
2806 2893
2807#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 2894#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
2808 &bfin_ac97, 2895 &bfin_ac97,
2809#endif 2896#endif
2897
2810#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) 2898#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
2811#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \ 2899#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \
2812 defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE) 2900 defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE)
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index 43df6afd22ad..7f8e5a9f5db6 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -5,13 +5,13 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision D, 09/18/2008; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List 14 * - Revision E, 05/25/2010; ADSP-BF534/ADSP-BF536/ADSP-BF537 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -160,12 +160,16 @@
160#define ANOMALY_05000443 (1) 160#define ANOMALY_05000443 (1)
161/* False Hardware Error when RETI Points to Invalid Memory */ 161/* False Hardware Error when RETI Points to Invalid Memory */
162#define ANOMALY_05000461 (1) 162#define ANOMALY_05000461 (1)
163/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
164#define ANOMALY_05000462 (1)
163/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 165/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
164#define ANOMALY_05000473 (1) 166#define ANOMALY_05000473 (1)
165/* Possible Lockup Condition whem Modifying PLL from External Memory */ 167/* Possible Lockup Condition whem Modifying PLL from External Memory */
166#define ANOMALY_05000475 (1) 168#define ANOMALY_05000475 (1)
167/* TESTSET Instruction Cannot Be Interrupted */ 169/* TESTSET Instruction Cannot Be Interrupted */
168#define ANOMALY_05000477 (1) 170#define ANOMALY_05000477 (1)
171/* Multiple Simultaneous Urgent DMA Requests May Cause DMA System Instability */
172#define ANOMALY_05000480 (__SILICON_REVISION__ < 3)
169/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */ 173/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
170#define ANOMALY_05000481 (1) 174#define ANOMALY_05000481 (1)
171/* IFLUSH sucks at life */ 175/* IFLUSH sucks at life */
@@ -204,6 +208,7 @@
204#define ANOMALY_05000363 (0) 208#define ANOMALY_05000363 (0)
205#define ANOMALY_05000364 (0) 209#define ANOMALY_05000364 (0)
206#define ANOMALY_05000380 (0) 210#define ANOMALY_05000380 (0)
211#define ANOMALY_05000383 (0)
207#define ANOMALY_05000386 (1) 212#define ANOMALY_05000386 (1)
208#define ANOMALY_05000389 (0) 213#define ANOMALY_05000389 (0)
209#define ANOMALY_05000400 (0) 214#define ANOMALY_05000400 (0)
@@ -211,6 +216,7 @@
211#define ANOMALY_05000430 (0) 216#define ANOMALY_05000430 (0)
212#define ANOMALY_05000432 (0) 217#define ANOMALY_05000432 (0)
213#define ANOMALY_05000435 (0) 218#define ANOMALY_05000435 (0)
219#define ANOMALY_05000440 (0)
214#define ANOMALY_05000447 (0) 220#define ANOMALY_05000447 (0)
215#define ANOMALY_05000448 (0) 221#define ANOMALY_05000448 (0)
216#define ANOMALY_05000456 (0) 222#define ANOMALY_05000456 (0)
diff --git a/arch/blackfin/mach-bf537/include/mach/irq.h b/arch/blackfin/mach-bf537/include/mach/irq.h
index 1a6d617c5fcf..b6ed8235bda4 100644
--- a/arch/blackfin/mach-bf537/include/mach/irq.h
+++ b/arch/blackfin/mach-bf537/include/mach/irq.h
@@ -7,193 +7,178 @@
7#ifndef _BF537_IRQ_H_ 7#ifndef _BF537_IRQ_H_
8#define _BF537_IRQ_H_ 8#define _BF537_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions 11
12 * Event Source Core Event Name 12#define NR_PERI_INTS 32
13 * Core Emulation ** 13
14 * Events (highest priority) EMU 0 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
15 * Reset RST 1 15#define IRQ_DMA_ERROR BFIN_IRQ(1) /* DMA Error (general) */
16 * NMI NMI 2 16#define IRQ_GENERIC_ERROR BFIN_IRQ(2) /* GENERIC Error Interrupt */
17 * Exception EVX 3 17#define IRQ_RTC BFIN_IRQ(3) /* RTC Interrupt */
18 * Reserved -- 4 18#define IRQ_PPI BFIN_IRQ(4) /* DMA0 Interrupt (PPI) */
19 * Hardware Error IVHW 5 19#define IRQ_SPORT0_RX BFIN_IRQ(5) /* DMA3 Interrupt (SPORT0 RX) */
20 * Core Timer IVTMR 6 20#define IRQ_SPORT0_TX BFIN_IRQ(6) /* DMA4 Interrupt (SPORT0 TX) */
21 * ..... 21#define IRQ_SPORT1_RX BFIN_IRQ(7) /* DMA5 Interrupt (SPORT1 RX) */
22 * 22#define IRQ_SPORT1_TX BFIN_IRQ(8) /* DMA6 Interrupt (SPORT1 TX) */
23 * Softirq IVG14 23#define IRQ_TWI BFIN_IRQ(9) /* TWI Interrupt */
24 * System Call -- 24#define IRQ_SPI BFIN_IRQ(10) /* DMA7 Interrupt (SPI) */
25 * (lowest priority) IVG15 25#define IRQ_UART0_RX BFIN_IRQ(11) /* DMA8 Interrupt (UART0 RX) */
26 */ 26#define IRQ_UART0_TX BFIN_IRQ(12) /* DMA9 Interrupt (UART0 TX) */
27 27#define IRQ_UART1_RX BFIN_IRQ(13) /* DMA10 Interrupt (UART1 RX) */
28#define SYS_IRQS 39 28#define IRQ_UART1_TX BFIN_IRQ(14) /* DMA11 Interrupt (UART1 TX) */
29#define NR_PERI_INTS 32 29#define IRQ_CAN_RX BFIN_IRQ(15) /* CAN Receive Interrupt */
30 30#define IRQ_CAN_TX BFIN_IRQ(16) /* CAN Transmit Interrupt */
31/* The ABSTRACT IRQ definitions */ 31#define IRQ_PH_INTA_MAC_RX BFIN_IRQ(17) /* Port H Interrupt A & DMA1 Interrupt (Ethernet RX) */
32/** the first seven of the following are fixed, the rest you change if you need to **/ 32#define IRQ_PH_INTB_MAC_TX BFIN_IRQ(18) /* Port H Interrupt B & DMA2 Interrupt (Ethernet TX) */
33#define IRQ_EMU 0 /*Emulation */ 33#define IRQ_TIMER0 BFIN_IRQ(19) /* Timer 0 */
34#define IRQ_RST 1 /*reset */ 34#define IRQ_TIMER1 BFIN_IRQ(20) /* Timer 1 */
35#define IRQ_NMI 2 /*Non Maskable */ 35#define IRQ_TIMER2 BFIN_IRQ(21) /* Timer 2 */
36#define IRQ_EVX 3 /*Exception */ 36#define IRQ_TIMER3 BFIN_IRQ(22) /* Timer 3 */
37#define IRQ_UNUSED 4 /*- unused interrupt*/ 37#define IRQ_TIMER4 BFIN_IRQ(23) /* Timer 4 */
38#define IRQ_HWERR 5 /*Hardware Error */ 38#define IRQ_TIMER5 BFIN_IRQ(24) /* Timer 5 */
39#define IRQ_CORETMR 6 /*Core timer */ 39#define IRQ_TIMER6 BFIN_IRQ(25) /* Timer 6 */
40 40#define IRQ_TIMER7 BFIN_IRQ(26) /* Timer 7 */
41#define IRQ_PLL_WAKEUP 7 /*PLL Wakeup Interrupt */ 41#define IRQ_PF_INTA_PG_INTA BFIN_IRQ(27) /* Ports F&G Interrupt A */
42#define IRQ_DMA_ERROR 8 /*DMA Error (general) */ 42#define IRQ_PORTG_INTB BFIN_IRQ(28) /* Port G Interrupt B */
43#define IRQ_GENERIC_ERROR 9 /*GENERIC Error Interrupt */ 43#define IRQ_MEM_DMA0 BFIN_IRQ(29) /* (Memory DMA Stream 0) */
44#define IRQ_RTC 10 /*RTC Interrupt */ 44#define IRQ_MEM_DMA1 BFIN_IRQ(30) /* (Memory DMA Stream 1) */
45#define IRQ_PPI 11 /*DMA0 Interrupt (PPI) */ 45#define IRQ_PF_INTB_WATCH BFIN_IRQ(31) /* Watchdog & Port F Interrupt B */
46#define IRQ_SPORT0_RX 12 /*DMA3 Interrupt (SPORT0 RX) */ 46
47#define IRQ_SPORT0_TX 13 /*DMA4 Interrupt (SPORT0 TX) */ 47#define SYS_IRQS 39
48#define IRQ_SPORT1_RX 14 /*DMA5 Interrupt (SPORT1 RX) */ 48
49#define IRQ_SPORT1_TX 15 /*DMA6 Interrupt (SPORT1 TX) */ 49#define IRQ_PPI_ERROR 42 /* PPI Error Interrupt */
50#define IRQ_TWI 16 /*TWI Interrupt */ 50#define IRQ_CAN_ERROR 43 /* CAN Error Interrupt */
51#define IRQ_SPI 17 /*DMA7 Interrupt (SPI) */ 51#define IRQ_MAC_ERROR 44 /* MAC Status/Error Interrupt */
52#define IRQ_UART0_RX 18 /*DMA8 Interrupt (UART0 RX) */ 52#define IRQ_SPORT0_ERROR 45 /* SPORT0 Error Interrupt */
53#define IRQ_UART0_TX 19 /*DMA9 Interrupt (UART0 TX) */ 53#define IRQ_SPORT1_ERROR 46 /* SPORT1 Error Interrupt */
54#define IRQ_UART1_RX 20 /*DMA10 Interrupt (UART1 RX) */ 54#define IRQ_SPI_ERROR 47 /* SPI Error Interrupt */
55#define IRQ_UART1_TX 21 /*DMA11 Interrupt (UART1 TX) */ 55#define IRQ_UART0_ERROR 48 /* UART Error Interrupt */
56#define IRQ_CAN_RX 22 /*CAN Receive Interrupt */ 56#define IRQ_UART1_ERROR 49 /* UART Error Interrupt */
57#define IRQ_CAN_TX 23 /*CAN Transmit Interrupt */ 57
58#define IRQ_MAC_RX 24 /*DMA1 (Ethernet RX) Interrupt */ 58#define IRQ_PF0 50
59#define IRQ_MAC_TX 25 /*DMA2 (Ethernet TX) Interrupt */ 59#define IRQ_PF1 51
60#define IRQ_TIMER0 26 /*Timer 0 */ 60#define IRQ_PF2 52
61#define IRQ_TIMER1 27 /*Timer 1 */ 61#define IRQ_PF3 53
62#define IRQ_TIMER2 28 /*Timer 2 */ 62#define IRQ_PF4 54
63#define IRQ_TIMER3 29 /*Timer 3 */ 63#define IRQ_PF5 55
64#define IRQ_TIMER4 30 /*Timer 4 */ 64#define IRQ_PF6 56
65#define IRQ_TIMER5 31 /*Timer 5 */ 65#define IRQ_PF7 57
66#define IRQ_TIMER6 32 /*Timer 6 */ 66#define IRQ_PF8 58
67#define IRQ_TIMER7 33 /*Timer 7 */ 67#define IRQ_PF9 59
68#define IRQ_PROG_INTA 34 /* PF Ports F&G (PF15:0) Interrupt A */ 68#define IRQ_PF10 60
69#define IRQ_PORTG_INTB 35 /* PF Port G (PF15:0) Interrupt B */ 69#define IRQ_PF11 61
70#define IRQ_MEM_DMA0 36 /*(Memory DMA Stream 0) */ 70#define IRQ_PF12 62
71#define IRQ_MEM_DMA1 37 /*(Memory DMA Stream 1) */ 71#define IRQ_PF13 63
72#define IRQ_PROG_INTB 38 /* PF Ports F (PF15:0) Interrupt B */ 72#define IRQ_PF14 64
73#define IRQ_WATCH 38 /*Watch Dog Timer */ 73#define IRQ_PF15 65
74 74
75#define IRQ_PPI_ERROR 42 /*PPI Error Interrupt */ 75#define IRQ_PG0 66
76#define IRQ_CAN_ERROR 43 /*CAN Error Interrupt */ 76#define IRQ_PG1 67
77#define IRQ_MAC_ERROR 44 /*MAC Status/Error Interrupt */ 77#define IRQ_PG2 68
78#define IRQ_SPORT0_ERROR 45 /*SPORT0 Error Interrupt */ 78#define IRQ_PG3 69
79#define IRQ_SPORT1_ERROR 46 /*SPORT1 Error Interrupt */ 79#define IRQ_PG4 70
80#define IRQ_SPI_ERROR 47 /*SPI Error Interrupt */ 80#define IRQ_PG5 71
81#define IRQ_UART0_ERROR 48 /*UART Error Interrupt */ 81#define IRQ_PG6 72
82#define IRQ_UART1_ERROR 49 /*UART Error Interrupt */ 82#define IRQ_PG7 73
83 83#define IRQ_PG8 74
84#define IRQ_PF0 50 84#define IRQ_PG9 75
85#define IRQ_PF1 51 85#define IRQ_PG10 76
86#define IRQ_PF2 52 86#define IRQ_PG11 77
87#define IRQ_PF3 53 87#define IRQ_PG12 78
88#define IRQ_PF4 54 88#define IRQ_PG13 79
89#define IRQ_PF5 55 89#define IRQ_PG14 80
90#define IRQ_PF6 56 90#define IRQ_PG15 81
91#define IRQ_PF7 57 91
92#define IRQ_PF8 58 92#define IRQ_PH0 82
93#define IRQ_PF9 59 93#define IRQ_PH1 83
94#define IRQ_PF10 60 94#define IRQ_PH2 84
95#define IRQ_PF11 61 95#define IRQ_PH3 85
96#define IRQ_PF12 62 96#define IRQ_PH4 86
97#define IRQ_PF13 63 97#define IRQ_PH5 87
98#define IRQ_PF14 64 98#define IRQ_PH6 88
99#define IRQ_PF15 65 99#define IRQ_PH7 89
100 100#define IRQ_PH8 90
101#define IRQ_PG0 66 101#define IRQ_PH9 91
102#define IRQ_PG1 67 102#define IRQ_PH10 92
103#define IRQ_PG2 68 103#define IRQ_PH11 93
104#define IRQ_PG3 69 104#define IRQ_PH12 94
105#define IRQ_PG4 70 105#define IRQ_PH13 95
106#define IRQ_PG5 71 106#define IRQ_PH14 96
107#define IRQ_PG6 72 107#define IRQ_PH15 97
108#define IRQ_PG7 73 108
109#define IRQ_PG8 74 109#define GPIO_IRQ_BASE IRQ_PF0
110#define IRQ_PG9 75 110
111#define IRQ_PG10 76 111#define IRQ_MAC_PHYINT 98 /* PHY_INT Interrupt */
112#define IRQ_PG11 77 112#define IRQ_MAC_MMCINT 99 /* MMC Counter Interrupt */
113#define IRQ_PG12 78 113#define IRQ_MAC_RXFSINT 100 /* RX Frame-Status Interrupt */
114#define IRQ_PG13 79 114#define IRQ_MAC_TXFSINT 101 /* TX Frame-Status Interrupt */
115#define IRQ_PG14 80 115#define IRQ_MAC_WAKEDET 102 /* Wake-Up Interrupt */
116#define IRQ_PG15 81 116#define IRQ_MAC_RXDMAERR 103 /* RX DMA Direction Error Interrupt */
117 117#define IRQ_MAC_TXDMAERR 104 /* TX DMA Direction Error Interrupt */
118#define IRQ_PH0 82 118#define IRQ_MAC_STMDONE 105 /* Station Mgt. Transfer Done Interrupt */
119#define IRQ_PH1 83 119
120#define IRQ_PH2 84 120#define IRQ_MAC_RX 106 /* DMA1 Interrupt (Ethernet RX) */
121#define IRQ_PH3 85 121#define IRQ_PORTH_INTA 107 /* Port H Interrupt A */
122#define IRQ_PH4 86 122
123#define IRQ_PH5 87 123#if 0 /* No Interrupt B support (yet) */
124#define IRQ_PH6 88 124#define IRQ_MAC_TX 108 /* DMA2 Interrupt (Ethernet TX) */
125#define IRQ_PH7 89 125#define IRQ_PORTH_INTB 109 /* Port H Interrupt B */
126#define IRQ_PH8 90 126#else
127#define IRQ_PH9 91 127#define IRQ_MAC_TX IRQ_PH_INTB_MAC_TX
128#define IRQ_PH10 92 128#endif
129#define IRQ_PH11 93 129
130#define IRQ_PH12 94 130#define IRQ_PORTF_INTA 110 /* Port F Interrupt A */
131#define IRQ_PH13 95 131#define IRQ_PORTG_INTA 111 /* Port G Interrupt A */
132#define IRQ_PH14 96 132
133#define IRQ_PH15 97 133#if 0 /* No Interrupt B support (yet) */
134 134#define IRQ_WATCH 112 /* Watchdog Timer */
135#define GPIO_IRQ_BASE IRQ_PF0 135#define IRQ_PORTF_INTB 113 /* Port F Interrupt B */
136 136#else
137#define IRQ_MAC_PHYINT 98 /* PHY_INT Interrupt */ 137#define IRQ_WATCH IRQ_PF_INTB_WATCH
138#define IRQ_MAC_MMCINT 99 /* MMC Counter Interrupt */ 138#endif
139#define IRQ_MAC_RXFSINT 100 /* RX Frame-Status Interrupt */ 139
140#define IRQ_MAC_TXFSINT 101 /* TX Frame-Status Interrupt */ 140#define NR_MACH_IRQS (113 + 1)
141#define IRQ_MAC_WAKEDET 102 /* Wake-Up Interrupt */ 141
142#define IRQ_MAC_RXDMAERR 103 /* RX DMA Direction Error Interrupt */ 142/* IAR0 BIT FIELDS */
143#define IRQ_MAC_TXDMAERR 104 /* TX DMA Direction Error Interrupt */ 143#define IRQ_PLL_WAKEUP_POS 0
144#define IRQ_MAC_STMDONE 105 /* Station Mgt. Transfer Done Interrupt */ 144#define IRQ_DMA_ERROR_POS 4
145 145#define IRQ_ERROR_POS 8
146#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1) 146#define IRQ_RTC_POS 12
147#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) 147#define IRQ_PPI_POS 16
148 148#define IRQ_SPORT0_RX_POS 20
149#define IVG7 7 149#define IRQ_SPORT0_TX_POS 24
150#define IVG8 8 150#define IRQ_SPORT1_RX_POS 28
151#define IVG9 9 151
152#define IVG10 10 152/* IAR1 BIT FIELDS */
153#define IVG11 11 153#define IRQ_SPORT1_TX_POS 0
154#define IVG12 12 154#define IRQ_TWI_POS 4
155#define IVG13 13 155#define IRQ_SPI_POS 8
156#define IVG14 14 156#define IRQ_UART0_RX_POS 12
157#define IVG15 15 157#define IRQ_UART0_TX_POS 16
158 158#define IRQ_UART1_RX_POS 20
159/* IAR0 BIT FIELDS*/ 159#define IRQ_UART1_TX_POS 24
160#define IRQ_PLL_WAKEUP_POS 0 160#define IRQ_CAN_RX_POS 28
161#define IRQ_DMA_ERROR_POS 4 161
162#define IRQ_ERROR_POS 8 162/* IAR2 BIT FIELDS */
163#define IRQ_RTC_POS 12 163#define IRQ_CAN_TX_POS 0
164#define IRQ_PPI_POS 16 164#define IRQ_MAC_RX_POS 4
165#define IRQ_SPORT0_RX_POS 20 165#define IRQ_MAC_TX_POS 8
166#define IRQ_SPORT0_TX_POS 24 166#define IRQ_TIMER0_POS 12
167#define IRQ_SPORT1_RX_POS 28 167#define IRQ_TIMER1_POS 16
168 168#define IRQ_TIMER2_POS 20
169/* IAR1 BIT FIELDS*/ 169#define IRQ_TIMER3_POS 24
170#define IRQ_SPORT1_TX_POS 0 170#define IRQ_TIMER4_POS 28
171#define IRQ_TWI_POS 4 171
172#define IRQ_SPI_POS 8 172/* IAR3 BIT FIELDS */
173#define IRQ_UART0_RX_POS 12 173#define IRQ_TIMER5_POS 0
174#define IRQ_UART0_TX_POS 16 174#define IRQ_TIMER6_POS 4
175#define IRQ_UART1_RX_POS 20 175#define IRQ_TIMER7_POS 8
176#define IRQ_UART1_TX_POS 24 176#define IRQ_PROG_INTA_POS 12
177#define IRQ_CAN_RX_POS 28 177#define IRQ_PORTG_INTB_POS 16
178 178#define IRQ_MEM_DMA0_POS 20
179/* IAR2 BIT FIELDS*/ 179#define IRQ_MEM_DMA1_POS 24
180#define IRQ_CAN_TX_POS 0 180#define IRQ_WATCH_POS 28
181#define IRQ_MAC_RX_POS 4 181
182#define IRQ_MAC_TX_POS 8 182#define init_mach_irq init_mach_irq
183#define IRQ_TIMER0_POS 12 183
184#define IRQ_TIMER1_POS 16 184#endif
185#define IRQ_TIMER2_POS 20
186#define IRQ_TIMER3_POS 24
187#define IRQ_TIMER4_POS 28
188
189/* IAR3 BIT FIELDS*/
190#define IRQ_TIMER5_POS 0
191#define IRQ_TIMER6_POS 4
192#define IRQ_TIMER7_POS 8
193#define IRQ_PROG_INTA_POS 12
194#define IRQ_PORTG_INTB_POS 16
195#define IRQ_MEM_DMA0_POS 20
196#define IRQ_MEM_DMA1_POS 24
197#define IRQ_WATCH_POS 28
198
199#endif /* _BF537_IRQ_H_ */
diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c
index f6500622b35d..2137a209a22b 100644
--- a/arch/blackfin/mach-bf537/ints-priority.c
+++ b/arch/blackfin/mach-bf537/ints-priority.c
@@ -10,6 +10,13 @@
10#include <linux/irq.h> 10#include <linux/irq.h>
11#include <asm/blackfin.h> 11#include <asm/blackfin.h>
12 12
13#include <asm/irq_handler.h>
14#include <asm/bfin5xx_spi.h>
15#include <asm/bfin_sport.h>
16#include <asm/bfin_can.h>
17#include <asm/bfin_dma.h>
18#include <asm/dpmc.h>
19
13void __init program_IAR(void) 20void __init program_IAR(void)
14{ 21{
15 /* Program the IAR0 Register with the configured priority */ 22 /* Program the IAR0 Register with the configured priority */
@@ -51,3 +58,159 @@ void __init program_IAR(void)
51 58
52 SSYNC(); 59 SSYNC();
53} 60}
61
62#define SPI_ERR_MASK (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE) /* SPI_STAT */
63#define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORT_STAT */
64#define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */
65#define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */
66#define UART_ERR_MASK (0x6) /* UART_IIR */
67#define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */
68
69static int error_int_mask;
70
71static void bf537_generic_error_mask_irq(struct irq_data *d)
72{
73 error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR));
74 if (!error_int_mask)
75 bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
76}
77
78static void bf537_generic_error_unmask_irq(struct irq_data *d)
79{
80 bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
81 error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR);
82}
83
84static struct irq_chip bf537_generic_error_irqchip = {
85 .name = "ERROR",
86 .irq_ack = bfin_ack_noop,
87 .irq_mask_ack = bf537_generic_error_mask_irq,
88 .irq_mask = bf537_generic_error_mask_irq,
89 .irq_unmask = bf537_generic_error_unmask_irq,
90};
91
92static void bf537_demux_error_irq(unsigned int int_err_irq,
93 struct irq_desc *inta_desc)
94{
95 int irq = 0;
96
97#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
98 if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
99 irq = IRQ_MAC_ERROR;
100 else
101#endif
102 if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
103 irq = IRQ_SPORT0_ERROR;
104 else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
105 irq = IRQ_SPORT1_ERROR;
106 else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
107 irq = IRQ_PPI_ERROR;
108 else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
109 irq = IRQ_CAN_ERROR;
110 else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
111 irq = IRQ_SPI_ERROR;
112 else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
113 irq = IRQ_UART0_ERROR;
114 else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
115 irq = IRQ_UART1_ERROR;
116
117 if (irq) {
118 if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
119 bfin_handle_irq(irq);
120 else {
121
122 switch (irq) {
123 case IRQ_PPI_ERROR:
124 bfin_write_PPI_STATUS(PPI_ERR_MASK);
125 break;
126#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
127 case IRQ_MAC_ERROR:
128 bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
129 break;
130#endif
131 case IRQ_SPORT0_ERROR:
132 bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
133 break;
134
135 case IRQ_SPORT1_ERROR:
136 bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
137 break;
138
139 case IRQ_CAN_ERROR:
140 bfin_write_CAN_GIS(CAN_ERR_MASK);
141 break;
142
143 case IRQ_SPI_ERROR:
144 bfin_write_SPI_STAT(SPI_ERR_MASK);
145 break;
146
147 default:
148 break;
149 }
150
151 pr_debug("IRQ %d:"
152 " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
153 irq);
154 }
155 } else
156 pr_err("%s: IRQ ?: PERIPHERAL ERROR INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
157 __func__);
158
159}
160
161#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
162static int mac_rx_int_mask;
163
164static void bf537_mac_rx_mask_irq(struct irq_data *d)
165{
166 mac_rx_int_mask &= ~(1L << (d->irq - IRQ_MAC_RX));
167 if (!mac_rx_int_mask)
168 bfin_internal_mask_irq(IRQ_PH_INTA_MAC_RX);
169}
170
171static void bf537_mac_rx_unmask_irq(struct irq_data *d)
172{
173 bfin_internal_unmask_irq(IRQ_PH_INTA_MAC_RX);
174 mac_rx_int_mask |= 1L << (d->irq - IRQ_MAC_RX);
175}
176
177static struct irq_chip bf537_mac_rx_irqchip = {
178 .name = "ERROR",
179 .irq_ack = bfin_ack_noop,
180 .irq_mask_ack = bf537_mac_rx_mask_irq,
181 .irq_mask = bf537_mac_rx_mask_irq,
182 .irq_unmask = bf537_mac_rx_unmask_irq,
183};
184
185static void bf537_demux_mac_rx_irq(unsigned int int_irq,
186 struct irq_desc *desc)
187{
188 if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR))
189 bfin_handle_irq(IRQ_MAC_RX);
190 else
191 bfin_demux_gpio_irq(int_irq, desc);
192}
193#endif
194
195void __init init_mach_irq(void)
196{
197 int irq;
198
199#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
200 /* Clear EMAC Interrupt Status bits so we can demux it later */
201 bfin_write_EMAC_SYSTAT(-1);
202#endif
203
204 irq_set_chained_handler(IRQ_GENERIC_ERROR, bf537_demux_error_irq);
205 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
206 irq_set_chip_and_handler(irq, &bf537_generic_error_irqchip,
207 handle_level_irq);
208
209#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
210 irq_set_chained_handler(IRQ_PH_INTA_MAC_RX, bf537_demux_mac_rx_irq);
211 irq_set_chip_and_handler(IRQ_MAC_RX, &bf537_mac_rx_irqchip, handle_level_irq);
212 irq_set_chip_and_handler(IRQ_PORTH_INTA, &bf537_mac_rx_irqchip, handle_level_irq);
213
214 irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
215#endif
216}
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 8774b481c78e..55e7d0712a94 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -5,14 +5,14 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision H, 07/10/2009; ADSP-BF538/BF538F Blackfin Processor Anomaly List 14 * - Revision I, 05/25/2010; ADSP-BF538/BF538F Blackfin Processor Anomaly List
15 * - Revision M, 07/10/2009; ADSP-BF539/BF539F Blackfin Processor Anomaly List 15 * - Revision N, 05/25/2010; ADSP-BF539/BF539F Blackfin Processor Anomaly List
16 */ 16 */
17 17
18#ifndef _MACH_ANOMALY_H_ 18#ifndef _MACH_ANOMALY_H_
@@ -179,6 +179,7 @@
179#define ANOMALY_05000363 (0) 179#define ANOMALY_05000363 (0)
180#define ANOMALY_05000364 (0) 180#define ANOMALY_05000364 (0)
181#define ANOMALY_05000380 (0) 181#define ANOMALY_05000380 (0)
182#define ANOMALY_05000383 (0)
182#define ANOMALY_05000386 (1) 183#define ANOMALY_05000386 (1)
183#define ANOMALY_05000389 (0) 184#define ANOMALY_05000389 (0)
184#define ANOMALY_05000400 (0) 185#define ANOMALY_05000400 (0)
@@ -186,6 +187,7 @@
186#define ANOMALY_05000430 (0) 187#define ANOMALY_05000430 (0)
187#define ANOMALY_05000432 (0) 188#define ANOMALY_05000432 (0)
188#define ANOMALY_05000435 (0) 189#define ANOMALY_05000435 (0)
190#define ANOMALY_05000440 (0)
189#define ANOMALY_05000447 (0) 191#define ANOMALY_05000447 (0)
190#define ANOMALY_05000448 (0) 192#define ANOMALY_05000448 (0)
191#define ANOMALY_05000456 (0) 193#define ANOMALY_05000456 (0)
@@ -193,6 +195,7 @@
193#define ANOMALY_05000465 (0) 195#define ANOMALY_05000465 (0)
194#define ANOMALY_05000467 (0) 196#define ANOMALY_05000467 (0)
195#define ANOMALY_05000474 (0) 197#define ANOMALY_05000474 (0)
198#define ANOMALY_05000480 (0)
196#define ANOMALY_05000485 (0) 199#define ANOMALY_05000485 (0)
197 200
198#endif 201#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/irq.h b/arch/blackfin/mach-bf538/include/mach/irq.h
index 7a479d224dc7..07ca069d37cd 100644
--- a/arch/blackfin/mach-bf538/include/mach/irq.h
+++ b/arch/blackfin/mach-bf538/include/mach/irq.h
@@ -7,38 +7,9 @@
7#ifndef _BF538_IRQ_H_ 7#ifndef _BF538_IRQ_H_
8#define _BF538_IRQ_H_ 8#define _BF538_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions 11
12 Event Source Core Event Name 12#define NR_PERI_INTS (2 * 32)
13 Core Emulation **
14 Events (highest priority) EMU 0
15 Reset RST 1
16 NMI NMI 2
17 Exception EVX 3
18 Reserved -- 4
19 Hardware Error IVHW 5
20 Core Timer IVTMR 6 *
21
22 .....
23
24 Software Interrupt 1 IVG14 31
25 Software Interrupt 2 --
26 (lowest priority) IVG15 32 *
27*/
28
29#define NR_PERI_INTS (2 * 32)
30
31/* The ABSTRACT IRQ definitions */
32/** the first seven of the following are fixed, the rest you change if you need to **/
33#define IRQ_EMU 0 /* Emulation */
34#define IRQ_RST 1 /* reset */
35#define IRQ_NMI 2 /* Non Maskable */
36#define IRQ_EVX 3 /* Exception */
37#define IRQ_UNUSED 4 /* - unused interrupt */
38#define IRQ_HWERR 5 /* Hardware Error */
39#define IRQ_CORETMR 6 /* Core timer */
40
41#define BFIN_IRQ(x) ((x) + 7)
42 13
43#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */ 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
44#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */ 15#define IRQ_DMA0_ERROR BFIN_IRQ(1) /* DMA Error 0 (generic) */
@@ -91,37 +62,26 @@
91 62
92#define SYS_IRQS BFIN_IRQ(63) /* 70 */ 63#define SYS_IRQS BFIN_IRQ(63) /* 70 */
93 64
94#define IRQ_PF0 71 65#define IRQ_PF0 71
95#define IRQ_PF1 72 66#define IRQ_PF1 72
96#define IRQ_PF2 73 67#define IRQ_PF2 73
97#define IRQ_PF3 74 68#define IRQ_PF3 74
98#define IRQ_PF4 75 69#define IRQ_PF4 75
99#define IRQ_PF5 76 70#define IRQ_PF5 76
100#define IRQ_PF6 77 71#define IRQ_PF6 77
101#define IRQ_PF7 78 72#define IRQ_PF7 78
102#define IRQ_PF8 79 73#define IRQ_PF8 79
103#define IRQ_PF9 80 74#define IRQ_PF9 80
104#define IRQ_PF10 81 75#define IRQ_PF10 81
105#define IRQ_PF11 82 76#define IRQ_PF11 82
106#define IRQ_PF12 83 77#define IRQ_PF12 83
107#define IRQ_PF13 84 78#define IRQ_PF13 84
108#define IRQ_PF14 85 79#define IRQ_PF14 85
109#define IRQ_PF15 86 80#define IRQ_PF15 86
110 81
111#define GPIO_IRQ_BASE IRQ_PF0 82#define GPIO_IRQ_BASE IRQ_PF0
112 83
113#define NR_MACH_IRQS (IRQ_PF15 + 1) 84#define NR_MACH_IRQS (IRQ_PF15 + 1)
114#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
115
116#define IVG7 7
117#define IVG8 8
118#define IVG9 9
119#define IVG10 10
120#define IVG11 11
121#define IVG12 12
122#define IVG13 13
123#define IVG14 14
124#define IVG15 15
125 85
126/* IAR0 BIT FIELDS */ 86/* IAR0 BIT FIELDS */
127#define IRQ_PLL_WAKEUP_POS 0 87#define IRQ_PLL_WAKEUP_POS 0
@@ -184,4 +144,5 @@
184#define IRQ_CAN_TX_POS 0 144#define IRQ_CAN_TX_POS 0
185#define IRQ_MEM1_DMA0_POS 4 145#define IRQ_MEM1_DMA0_POS 4
186#define IRQ_MEM1_DMA1_POS 8 146#define IRQ_MEM1_DMA1_POS 8
187#endif /* _BF538_IRQ_H_ */ 147
148#endif
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 93e19a54a880..311bf9970fe7 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -22,6 +22,7 @@
22#include <asm/gpio.h> 22#include <asm/gpio.h>
23#include <asm/nand.h> 23#include <asm/nand.h>
24#include <asm/dpmc.h> 24#include <asm/dpmc.h>
25#include <asm/bfin_sport.h>
25#include <asm/portmux.h> 26#include <asm/portmux.h>
26#include <asm/bfin_sdh.h> 27#include <asm/bfin_sdh.h>
27#include <mach/bf54x_keys.h> 28#include <mach/bf54x_keys.h>
@@ -956,7 +957,15 @@ static struct mtd_partition ezkit_partitions[] = {
956 .offset = MTDPART_OFS_APPEND, 957 .offset = MTDPART_OFS_APPEND,
957 }, { 958 }, {
958 .name = "file system(nor)", 959 .name = "file system(nor)",
959 .size = MTDPART_SIZ_FULL, 960 .size = 0x1000000 - 0x80000 - 0x400000 - 0x8000 * 4,
961 .offset = MTDPART_OFS_APPEND,
962 }, {
963 .name = "config(nor)",
964 .size = 0x8000 * 3,
965 .offset = MTDPART_OFS_APPEND,
966 }, {
967 .name = "u-boot env(nor)",
968 .size = 0x8000,
960 .offset = MTDPART_OFS_APPEND, 969 .offset = MTDPART_OFS_APPEND,
961 } 970 }
962}; 971};
@@ -1312,27 +1321,110 @@ static struct platform_device bfin_dpmc = {
1312 }, 1321 },
1313}; 1322};
1314 1323
1315#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 1324#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
1325 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
1326 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
1327
1328#define SPORT_REQ(x) \
1329 [x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \
1330 P_SPORT##x##_RFS, P_SPORT##x##_DRPRI, P_SPORT##x##_RSCLK, 0}
1331
1332static const u16 bfin_snd_pin[][7] = {
1333 SPORT_REQ(0),
1334 SPORT_REQ(1),
1335};
1336
1337static struct bfin_snd_platform_data bfin_snd_data[] = {
1338 {
1339 .pin_req = &bfin_snd_pin[0][0],
1340 },
1341 {
1342 .pin_req = &bfin_snd_pin[1][0],
1343 },
1344};
1345
1346#define BFIN_SND_RES(x) \
1347 [x] = { \
1348 { \
1349 .start = SPORT##x##_TCR1, \
1350 .end = SPORT##x##_TCR1, \
1351 .flags = IORESOURCE_MEM \
1352 }, \
1353 { \
1354 .start = CH_SPORT##x##_RX, \
1355 .end = CH_SPORT##x##_RX, \
1356 .flags = IORESOURCE_DMA, \
1357 }, \
1358 { \
1359 .start = CH_SPORT##x##_TX, \
1360 .end = CH_SPORT##x##_TX, \
1361 .flags = IORESOURCE_DMA, \
1362 }, \
1363 { \
1364 .start = IRQ_SPORT##x##_ERROR, \
1365 .end = IRQ_SPORT##x##_ERROR, \
1366 .flags = IORESOURCE_IRQ, \
1367 } \
1368 }
1369
1370static struct resource bfin_snd_resources[][4] = {
1371 BFIN_SND_RES(0),
1372 BFIN_SND_RES(1),
1373};
1374
1375static struct platform_device bfin_pcm = {
1376 .name = "bfin-pcm-audio",
1377 .id = -1,
1378};
1379#endif
1380
1381#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
1382static struct platform_device bfin_ad73311_codec_device = {
1383 .name = "ad73311",
1384 .id = -1,
1385};
1386#endif
1387
1388#if defined(CONFIG_SND_BF5XX_SOC_AD1980) || defined(CONFIG_SND_BF5XX_SOC_AD1980_MODULE)
1389static struct platform_device bfin_ad1980_codec_device = {
1390 .name = "ad1980",
1391 .id = -1,
1392};
1393#endif
1394
1395#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
1316static struct platform_device bfin_i2s = { 1396static struct platform_device bfin_i2s = {
1317 .name = "bfin-i2s", 1397 .name = "bfin-i2s",
1318 .id = CONFIG_SND_BF5XX_SPORT_NUM, 1398 .id = CONFIG_SND_BF5XX_SPORT_NUM,
1319 /* TODO: add platform data here */ 1399 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
1400 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
1401 .dev = {
1402 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
1403 },
1320}; 1404};
1321#endif 1405#endif
1322 1406
1323#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) 1407#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
1324static struct platform_device bfin_tdm = { 1408static struct platform_device bfin_tdm = {
1325 .name = "bfin-tdm", 1409 .name = "bfin-tdm",
1326 .id = CONFIG_SND_BF5XX_SPORT_NUM, 1410 .id = CONFIG_SND_BF5XX_SPORT_NUM,
1327 /* TODO: add platform data here */ 1411 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
1412 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
1413 .dev = {
1414 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
1415 },
1328}; 1416};
1329#endif 1417#endif
1330 1418
1331#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) 1419#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
1332static struct platform_device bfin_ac97 = { 1420static struct platform_device bfin_ac97 = {
1333 .name = "bfin-ac97", 1421 .name = "bfin-ac97",
1334 .id = CONFIG_SND_BF5XX_SPORT_NUM, 1422 .id = CONFIG_SND_BF5XX_SPORT_NUM,
1335 /* TODO: add platform data here */ 1423 .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
1424 .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
1425 .dev = {
1426 .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
1427 },
1336}; 1428};
1337#endif 1429#endif
1338 1430
@@ -1450,6 +1542,16 @@ static struct platform_device *ezkit_devices[] __initdata = {
1450 &ezkit_flash_device, 1542 &ezkit_flash_device,
1451#endif 1543#endif
1452 1544
1545#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
1546 defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
1547 defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
1548 &bfin_pcm,
1549#endif
1550
1551#if defined(CONFIG_SND_BF5XX_SOC_AD1980) || defined(CONFIG_SND_BF5XX_SOC_AD1980_MODULE)
1552 &bfin_ad1980_codec_device,
1553#endif
1554
1453#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) 1555#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
1454 &bfin_i2s, 1556 &bfin_i2s,
1455#endif 1557#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index ffd0537295ac..9e70785bdde3 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -5,13 +5,13 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision I, 07/23/2009; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List 14 * - Revision J, 06/03/2010; ADSP-BF542/BF544/BF547/BF548/BF549 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -220,6 +220,8 @@
220#define ANOMALY_05000481 (1) 220#define ANOMALY_05000481 (1)
221/* Possible USB Data Corruption When Multiple Endpoints Are Accessed by the Core */ 221/* Possible USB Data Corruption When Multiple Endpoints Are Accessed by the Core */
222#define ANOMALY_05000483 (1) 222#define ANOMALY_05000483 (1)
223/* DDR Trim May Not Be Performed for Certain VLEV Values in OTP Page PBS00L */
224#define ANOMALY_05000484 (__SILICON_REVISION__ < 3)
223/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */ 225/* PLL_CTL Change Using bfrom_SysControl() Can Result in Processor Overclocking */
224#define ANOMALY_05000485 (__SILICON_REVISION__ >= 2) 226#define ANOMALY_05000485 (__SILICON_REVISION__ >= 2)
225/* IFLUSH sucks at life */ 227/* IFLUSH sucks at life */
@@ -274,6 +276,8 @@
274#define ANOMALY_05000412 (0) 276#define ANOMALY_05000412 (0)
275#define ANOMALY_05000432 (0) 277#define ANOMALY_05000432 (0)
276#define ANOMALY_05000435 (0) 278#define ANOMALY_05000435 (0)
279#define ANOMALY_05000440 (0)
277#define ANOMALY_05000475 (0) 280#define ANOMALY_05000475 (0)
281#define ANOMALY_05000480 (0)
278 282
279#endif 283#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 7f87787e7738..533b8095b540 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -7,38 +7,9 @@
7#ifndef _BF548_IRQ_H_ 7#ifndef _BF548_IRQ_H_
8#define _BF548_IRQ_H_ 8#define _BF548_IRQ_H_
9 9
10/* 10#include <mach-common/irq.h>
11 * Interrupt source definitions
12 Event Source Core Event Name
13Core Emulation **
14Events (highest priority) EMU 0
15 Reset RST 1
16 NMI NMI 2
17 Exception EVX 3
18 Reserved -- 4
19 Hardware Error IVHW 5
20 Core Timer IVTMR 6 *
21
22.....
23
24 Software Interrupt 1 IVG14 31
25 Software Interrupt 2 --
26 (lowest priority) IVG15 32 *
27 */
28
29#define NR_PERI_INTS (32 * 3)
30
31/* The ABSTRACT IRQ definitions */
32/** the first seven of the following are fixed, the rest you change if you need to **/
33#define IRQ_EMU 0 /* Emulation */
34#define IRQ_RST 1 /* reset */
35#define IRQ_NMI 2 /* Non Maskable */
36#define IRQ_EVX 3 /* Exception */
37#define IRQ_UNUSED 4 /* - unused interrupt*/
38#define IRQ_HWERR 5 /* Hardware Error */
39#define IRQ_CORETMR 6 /* Core timer */
40 11
41#define BFIN_IRQ(x) ((x) + 7) 12#define NR_PERI_INTS (3 * 32)
42 13
43#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */ 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
44#define IRQ_DMAC0_ERROR BFIN_IRQ(1) /* DMAC0 Status Interrupt */ 15#define IRQ_DMAC0_ERROR BFIN_IRQ(1) /* DMAC0 Status Interrupt */
@@ -311,49 +282,37 @@ Events (highest priority) EMU 0
311#define IRQ_PJ14 BFIN_PJ_IRQ(14) /* N/A */ 282#define IRQ_PJ14 BFIN_PJ_IRQ(14) /* N/A */
312#define IRQ_PJ15 BFIN_PJ_IRQ(15) /* N/A */ 283#define IRQ_PJ15 BFIN_PJ_IRQ(15) /* N/A */
313 284
314#define GPIO_IRQ_BASE IRQ_PA0 285#define GPIO_IRQ_BASE IRQ_PA0
315 286
316#define NR_MACH_IRQS (IRQ_PJ15 + 1) 287#define NR_MACH_IRQS (IRQ_PJ15 + 1)
317#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
318 288
319/* For compatibility reasons with existing code */ 289/* For compatibility reasons with existing code */
320 290
321#define IRQ_DMAC0_ERR IRQ_DMAC0_ERROR 291#define IRQ_DMAC0_ERR IRQ_DMAC0_ERROR
322#define IRQ_EPPI0_ERR IRQ_EPPI0_ERROR 292#define IRQ_EPPI0_ERR IRQ_EPPI0_ERROR
323#define IRQ_SPORT0_ERR IRQ_SPORT0_ERROR 293#define IRQ_SPORT0_ERR IRQ_SPORT0_ERROR
324#define IRQ_SPORT1_ERR IRQ_SPORT1_ERROR 294#define IRQ_SPORT1_ERR IRQ_SPORT1_ERROR
325#define IRQ_SPI0_ERR IRQ_SPI0_ERROR 295#define IRQ_SPI0_ERR IRQ_SPI0_ERROR
326#define IRQ_UART0_ERR IRQ_UART0_ERROR 296#define IRQ_UART0_ERR IRQ_UART0_ERROR
327#define IRQ_DMAC1_ERR IRQ_DMAC1_ERROR 297#define IRQ_DMAC1_ERR IRQ_DMAC1_ERROR
328#define IRQ_SPORT2_ERR IRQ_SPORT2_ERROR 298#define IRQ_SPORT2_ERR IRQ_SPORT2_ERROR
329#define IRQ_SPORT3_ERR IRQ_SPORT3_ERROR 299#define IRQ_SPORT3_ERR IRQ_SPORT3_ERROR
330#define IRQ_SPI1_ERR IRQ_SPI1_ERROR 300#define IRQ_SPI1_ERR IRQ_SPI1_ERROR
331#define IRQ_SPI2_ERR IRQ_SPI2_ERROR 301#define IRQ_SPI2_ERR IRQ_SPI2_ERROR
332#define IRQ_UART1_ERR IRQ_UART1_ERROR 302#define IRQ_UART1_ERR IRQ_UART1_ERROR
333#define IRQ_UART2_ERR IRQ_UART2_ERROR 303#define IRQ_UART2_ERR IRQ_UART2_ERROR
334#define IRQ_CAN0_ERR IRQ_CAN0_ERROR 304#define IRQ_CAN0_ERR IRQ_CAN0_ERROR
335#define IRQ_MXVR_ERR IRQ_MXVR_ERROR 305#define IRQ_MXVR_ERR IRQ_MXVR_ERROR
336#define IRQ_EPPI1_ERR IRQ_EPPI1_ERROR 306#define IRQ_EPPI1_ERR IRQ_EPPI1_ERROR
337#define IRQ_EPPI2_ERR IRQ_EPPI2_ERROR 307#define IRQ_EPPI2_ERR IRQ_EPPI2_ERROR
338#define IRQ_UART3_ERR IRQ_UART3_ERROR 308#define IRQ_UART3_ERR IRQ_UART3_ERROR
339#define IRQ_HOST_ERR IRQ_HOST_ERROR 309#define IRQ_HOST_ERR IRQ_HOST_ERROR
340#define IRQ_PIXC_ERR IRQ_PIXC_ERROR 310#define IRQ_PIXC_ERR IRQ_PIXC_ERROR
341#define IRQ_NFC_ERR IRQ_NFC_ERROR 311#define IRQ_NFC_ERR IRQ_NFC_ERROR
342#define IRQ_ATAPI_ERR IRQ_ATAPI_ERROR 312#define IRQ_ATAPI_ERR IRQ_ATAPI_ERROR
343#define IRQ_CAN1_ERR IRQ_CAN1_ERROR 313#define IRQ_CAN1_ERR IRQ_CAN1_ERROR
344#define IRQ_HS_DMA_ERR IRQ_HS_DMA_ERROR 314#define IRQ_HS_DMA_ERR IRQ_HS_DMA_ERROR
345 315
346
347#define IVG7 7
348#define IVG8 8
349#define IVG9 9
350#define IVG10 10
351#define IVG11 11
352#define IVG12 12
353#define IVG13 13
354#define IVG14 14
355#define IVG15 15
356
357/* IAR0 BIT FIELDS */ 316/* IAR0 BIT FIELDS */
358#define IRQ_PLL_WAKEUP_POS 0 317#define IRQ_PLL_WAKEUP_POS 0
359#define IRQ_DMAC0_ERR_POS 4 318#define IRQ_DMAC0_ERR_POS 4
@@ -492,4 +451,4 @@ struct bfin_pint_regs {
492 451
493#endif 452#endif
494 453
495#endif /* _BF548_IRQ_H_ */ 454#endif
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index f667e7704197..5067984a62e7 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -247,7 +247,15 @@ static struct mtd_partition ezkit_partitions[] = {
247 .offset = MTDPART_OFS_APPEND, 247 .offset = MTDPART_OFS_APPEND,
248 }, { 248 }, {
249 .name = "file system(nor)", 249 .name = "file system(nor)",
250 .size = MTDPART_SIZ_FULL, 250 .size = 0x800000 - 0x40000 - 0x1C0000 - 0x2000 * 8,
251 .offset = MTDPART_OFS_APPEND,
252 }, {
253 .name = "config(nor)",
254 .size = 0x2000 * 7,
255 .offset = MTDPART_OFS_APPEND,
256 }, {
257 .name = "u-boot env(nor)",
258 .size = 0x2000,
251 .offset = MTDPART_OFS_APPEND, 259 .offset = MTDPART_OFS_APPEND,
252 } 260 }
253}; 261};
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 6a3499b02097..22b5ab773027 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -5,13 +5,13 @@
5 * and can be replaced with that version at any time 5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE 6 * DO NOT EDIT THIS FILE
7 * 7 *
8 * Copyright 2004-2010 Analog Devices Inc. 8 * Copyright 2004-2011 Analog Devices Inc.
9 * Licensed under the ADI BSD license. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd 10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
11 */ 11 */
12 12
13/* This file should be up to date with: 13/* This file should be up to date with:
14 * - Revision Q, 11/07/2008; ADSP-BF561 Blackfin Processor Anomaly List 14 * - Revision R, 05/25/2010; ADSP-BF561 Blackfin Processor Anomaly List
15 */ 15 */
16 16
17#ifndef _MACH_ANOMALY_H_ 17#ifndef _MACH_ANOMALY_H_
@@ -290,12 +290,18 @@
290#define ANOMALY_05000428 (__SILICON_REVISION__ > 3) 290#define ANOMALY_05000428 (__SILICON_REVISION__ > 3)
291/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */ 291/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
292#define ANOMALY_05000443 (1) 292#define ANOMALY_05000443 (1)
293/* SCKELOW Feature Is Not Functional */
294#define ANOMALY_05000458 (1)
293/* False Hardware Error when RETI Points to Invalid Memory */ 295/* False Hardware Error when RETI Points to Invalid Memory */
294#define ANOMALY_05000461 (1) 296#define ANOMALY_05000461 (1)
297/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
298#define ANOMALY_05000462 (1)
299/* Boot Failure When SDRAM Control Signals Toggle Coming Out Of Reset */
300#define ANOMALY_05000471 (1)
295/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */ 301/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
296#define ANOMALY_05000473 (1) 302#define ANOMALY_05000473 (1)
297/* Possible Lockup Condition whem Modifying PLL from External Memory */ 303/* Possible Lockup Condition whem Modifying PLL from External Memory */
298#define ANOMALY_05000475 (__SILICON_REVISION__ < 4) 304#define ANOMALY_05000475 (1)
299/* TESTSET Instruction Cannot Be Interrupted */ 305/* TESTSET Instruction Cannot Be Interrupted */
300#define ANOMALY_05000477 (1) 306#define ANOMALY_05000477 (1)
301/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */ 307/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
@@ -314,12 +320,14 @@
314#define ANOMALY_05000353 (1) 320#define ANOMALY_05000353 (1)
315#define ANOMALY_05000364 (0) 321#define ANOMALY_05000364 (0)
316#define ANOMALY_05000380 (0) 322#define ANOMALY_05000380 (0)
323#define ANOMALY_05000383 (0)
317#define ANOMALY_05000386 (1) 324#define ANOMALY_05000386 (1)
318#define ANOMALY_05000389 (0) 325#define ANOMALY_05000389 (0)
319#define ANOMALY_05000400 (0) 326#define ANOMALY_05000400 (0)
320#define ANOMALY_05000430 (0) 327#define ANOMALY_05000430 (0)
321#define ANOMALY_05000432 (0) 328#define ANOMALY_05000432 (0)
322#define ANOMALY_05000435 (0) 329#define ANOMALY_05000435 (0)
330#define ANOMALY_05000440 (0)
323#define ANOMALY_05000447 (0) 331#define ANOMALY_05000447 (0)
324#define ANOMALY_05000448 (0) 332#define ANOMALY_05000448 (0)
325#define ANOMALY_05000456 (0) 333#define ANOMALY_05000456 (0)
@@ -327,6 +335,7 @@
327#define ANOMALY_05000465 (0) 335#define ANOMALY_05000465 (0)
328#define ANOMALY_05000467 (0) 336#define ANOMALY_05000467 (0)
329#define ANOMALY_05000474 (0) 337#define ANOMALY_05000474 (0)
338#define ANOMALY_05000480 (0)
330#define ANOMALY_05000485 (0) 339#define ANOMALY_05000485 (0)
331 340
332#endif 341#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/irq.h b/arch/blackfin/mach-bf561/include/mach/irq.h
index c95566ade51b..d6998520f70f 100644
--- a/arch/blackfin/mach-bf561/include/mach/irq.h
+++ b/arch/blackfin/mach-bf561/include/mach/irq.h
@@ -7,212 +7,98 @@
7#ifndef _BF561_IRQ_H_ 7#ifndef _BF561_IRQ_H_
8#define _BF561_IRQ_H_ 8#define _BF561_IRQ_H_
9 9
10/*********************************************************************** 10#include <mach-common/irq.h>
11 * Interrupt source definitions: 11
12 Event Source Core Event Name IRQ No 12#define NR_PERI_INTS (2 * 32)
13 (highest priority) 13
14 Emulation Events EMU 0 14#define IRQ_PLL_WAKEUP BFIN_IRQ(0) /* PLL Wakeup Interrupt */
15 Reset RST 1 15#define IRQ_DMA1_ERROR BFIN_IRQ(1) /* DMA1 Error (general) */
16 NMI NMI 2 16#define IRQ_DMA_ERROR IRQ_DMA1_ERROR /* DMA1 Error (general) */
17 Exception EVX 3 17#define IRQ_DMA2_ERROR BFIN_IRQ(2) /* DMA2 Error (general) */
18 Reserved -- 4 18#define IRQ_IMDMA_ERROR BFIN_IRQ(3) /* IMDMA Error Interrupt */
19 Hardware Error IVHW 5 19#define IRQ_PPI1_ERROR BFIN_IRQ(4) /* PPI1 Error Interrupt */
20 Core Timer IVTMR 6 * 20#define IRQ_PPI_ERROR IRQ_PPI1_ERROR /* PPI1 Error Interrupt */
21 21#define IRQ_PPI2_ERROR BFIN_IRQ(5) /* PPI2 Error Interrupt */
22 PLL Wakeup Interrupt IVG7 7 22#define IRQ_SPORT0_ERROR BFIN_IRQ(6) /* SPORT0 Error Interrupt */
23 DMA1 Error (generic) IVG7 8 23#define IRQ_SPORT1_ERROR BFIN_IRQ(7) /* SPORT1 Error Interrupt */
24 DMA2 Error (generic) IVG7 9 24#define IRQ_SPI_ERROR BFIN_IRQ(8) /* SPI Error Interrupt */
25 IMDMA Error (generic) IVG7 10 25#define IRQ_UART_ERROR BFIN_IRQ(9) /* UART Error Interrupt */
26 PPI1 Error Interrupt IVG7 11 26#define IRQ_RESERVED_ERROR BFIN_IRQ(10) /* Reversed */
27 PPI2 Error Interrupt IVG7 12 27#define IRQ_DMA1_0 BFIN_IRQ(11) /* DMA1 0 Interrupt(PPI1) */
28 SPORT0 Error Interrupt IVG7 13 28#define IRQ_PPI IRQ_DMA1_0 /* DMA1 0 Interrupt(PPI1) */
29 SPORT1 Error Interrupt IVG7 14 29#define IRQ_PPI0 IRQ_DMA1_0 /* DMA1 0 Interrupt(PPI1) */
30 SPI Error Interrupt IVG7 15 30#define IRQ_DMA1_1 BFIN_IRQ(12) /* DMA1 1 Interrupt(PPI2) */
31 UART Error Interrupt IVG7 16 31#define IRQ_PPI1 IRQ_DMA1_1 /* DMA1 1 Interrupt(PPI2) */
32 Reserved Interrupt IVG7 17 32#define IRQ_DMA1_2 BFIN_IRQ(13) /* DMA1 2 Interrupt */
33 33#define IRQ_DMA1_3 BFIN_IRQ(14) /* DMA1 3 Interrupt */
34 DMA1 0 Interrupt(PPI1) IVG8 18 34#define IRQ_DMA1_4 BFIN_IRQ(15) /* DMA1 4 Interrupt */
35 DMA1 1 Interrupt(PPI2) IVG8 19 35#define IRQ_DMA1_5 BFIN_IRQ(16) /* DMA1 5 Interrupt */
36 DMA1 2 Interrupt IVG8 20 36#define IRQ_DMA1_6 BFIN_IRQ(17) /* DMA1 6 Interrupt */
37 DMA1 3 Interrupt IVG8 21 37#define IRQ_DMA1_7 BFIN_IRQ(18) /* DMA1 7 Interrupt */
38 DMA1 4 Interrupt IVG8 22 38#define IRQ_DMA1_8 BFIN_IRQ(19) /* DMA1 8 Interrupt */
39 DMA1 5 Interrupt IVG8 23 39#define IRQ_DMA1_9 BFIN_IRQ(20) /* DMA1 9 Interrupt */
40 DMA1 6 Interrupt IVG8 24 40#define IRQ_DMA1_10 BFIN_IRQ(21) /* DMA1 10 Interrupt */
41 DMA1 7 Interrupt IVG8 25 41#define IRQ_DMA1_11 BFIN_IRQ(22) /* DMA1 11 Interrupt */
42 DMA1 8 Interrupt IVG8 26 42#define IRQ_DMA2_0 BFIN_IRQ(23) /* DMA2 0 (SPORT0 RX) */
43 DMA1 9 Interrupt IVG8 27 43#define IRQ_SPORT0_RX IRQ_DMA2_0 /* DMA2 0 (SPORT0 RX) */
44 DMA1 10 Interrupt IVG8 28 44#define IRQ_DMA2_1 BFIN_IRQ(24) /* DMA2 1 (SPORT0 TX) */
45 DMA1 11 Interrupt IVG8 29 45#define IRQ_SPORT0_TX IRQ_DMA2_1 /* DMA2 1 (SPORT0 TX) */
46 46#define IRQ_DMA2_2 BFIN_IRQ(25) /* DMA2 2 (SPORT1 RX) */
47 DMA2 0 (SPORT0 RX) IVG9 30 47#define IRQ_SPORT1_RX IRQ_DMA2_2 /* DMA2 2 (SPORT1 RX) */
48 DMA2 1 (SPORT0 TX) IVG9 31 48#define IRQ_DMA2_3 BFIN_IRQ(26) /* DMA2 3 (SPORT2 TX) */
49 DMA2 2 (SPORT1 RX) IVG9 32 49#define IRQ_SPORT1_TX IRQ_DMA2_3 /* DMA2 3 (SPORT2 TX) */
50 DMA2 3 (SPORT2 TX) IVG9 33 50#define IRQ_DMA2_4 BFIN_IRQ(27) /* DMA2 4 (SPI) */
51 DMA2 4 (SPI) IVG9 34 51#define IRQ_SPI IRQ_DMA2_4 /* DMA2 4 (SPI) */
52 DMA2 5 (UART RX) IVG9 35 52#define IRQ_DMA2_5 BFIN_IRQ(28) /* DMA2 5 (UART RX) */
53 DMA2 6 (UART TX) IVG9 36 53#define IRQ_UART_RX IRQ_DMA2_5 /* DMA2 5 (UART RX) */
54 DMA2 7 Interrupt IVG9 37 54#define IRQ_DMA2_6 BFIN_IRQ(29) /* DMA2 6 (UART TX) */
55 DMA2 8 Interrupt IVG9 38 55#define IRQ_UART_TX IRQ_DMA2_6 /* DMA2 6 (UART TX) */
56 DMA2 9 Interrupt IVG9 39 56#define IRQ_DMA2_7 BFIN_IRQ(30) /* DMA2 7 Interrupt */
57 DMA2 10 Interrupt IVG9 40 57#define IRQ_DMA2_8 BFIN_IRQ(31) /* DMA2 8 Interrupt */
58 DMA2 11 Interrupt IVG9 41 58#define IRQ_DMA2_9 BFIN_IRQ(32) /* DMA2 9 Interrupt */
59 59#define IRQ_DMA2_10 BFIN_IRQ(33) /* DMA2 10 Interrupt */
60 TIMER 0 Interrupt IVG10 42 60#define IRQ_DMA2_11 BFIN_IRQ(34) /* DMA2 11 Interrupt */
61 TIMER 1 Interrupt IVG10 43 61#define IRQ_TIMER0 BFIN_IRQ(35) /* TIMER 0 Interrupt */
62 TIMER 2 Interrupt IVG10 44 62#define IRQ_TIMER1 BFIN_IRQ(36) /* TIMER 1 Interrupt */
63 TIMER 3 Interrupt IVG10 45 63#define IRQ_TIMER2 BFIN_IRQ(37) /* TIMER 2 Interrupt */
64 TIMER 4 Interrupt IVG10 46 64#define IRQ_TIMER3 BFIN_IRQ(38) /* TIMER 3 Interrupt */
65 TIMER 5 Interrupt IVG10 47 65#define IRQ_TIMER4 BFIN_IRQ(39) /* TIMER 4 Interrupt */
66 TIMER 6 Interrupt IVG10 48 66#define IRQ_TIMER5 BFIN_IRQ(40) /* TIMER 5 Interrupt */
67 TIMER 7 Interrupt IVG10 49 67#define IRQ_TIMER6 BFIN_IRQ(41) /* TIMER 6 Interrupt */
68 TIMER 8 Interrupt IVG10 50 68#define IRQ_TIMER7 BFIN_IRQ(42) /* TIMER 7 Interrupt */
69 TIMER 9 Interrupt IVG10 51 69#define IRQ_TIMER8 BFIN_IRQ(43) /* TIMER 8 Interrupt */
70 TIMER 10 Interrupt IVG10 52 70#define IRQ_TIMER9 BFIN_IRQ(44) /* TIMER 9 Interrupt */
71 TIMER 11 Interrupt IVG10 53 71#define IRQ_TIMER10 BFIN_IRQ(45) /* TIMER 10 Interrupt */
72 72#define IRQ_TIMER11 BFIN_IRQ(46) /* TIMER 11 Interrupt */
73 Programmable Flags0 A (8) IVG11 54 73#define IRQ_PROG0_INTA BFIN_IRQ(47) /* Programmable Flags0 A (8) */
74 Programmable Flags0 B (8) IVG11 55 74#define IRQ_PROG_INTA IRQ_PROG0_INTA /* Programmable Flags0 A (8) */
75 Programmable Flags1 A (8) IVG11 56 75#define IRQ_PROG0_INTB BFIN_IRQ(48) /* Programmable Flags0 B (8) */
76 Programmable Flags1 B (8) IVG11 57 76#define IRQ_PROG_INTB IRQ_PROG0_INTB /* Programmable Flags0 B (8) */
77 Programmable Flags2 A (8) IVG11 58 77#define IRQ_PROG1_INTA BFIN_IRQ(49) /* Programmable Flags1 A (8) */
78 Programmable Flags2 B (8) IVG11 59 78#define IRQ_PROG1_INTB BFIN_IRQ(50) /* Programmable Flags1 B (8) */
79 79#define IRQ_PROG2_INTA BFIN_IRQ(51) /* Programmable Flags2 A (8) */
80 MDMA1 0 write/read INT IVG8 60 80#define IRQ_PROG2_INTB BFIN_IRQ(52) /* Programmable Flags2 B (8) */
81 MDMA1 1 write/read INT IVG8 61 81#define IRQ_DMA1_WRRD0 BFIN_IRQ(53) /* MDMA1 0 write/read INT */
82 82#define IRQ_DMA_WRRD0 IRQ_DMA1_WRRD0 /* MDMA1 0 write/read INT */
83 MDMA2 0 write/read INT IVG9 62
84 MDMA2 1 write/read INT IVG9 63
85
86 IMDMA 0 write/read INT IVG12 64
87 IMDMA 1 write/read INT IVG12 65
88
89 Watch Dog Timer IVG13 66
90
91 Reserved interrupt IVG7 67
92 Reserved interrupt IVG7 68
93 Supplemental interrupt 0 IVG7 69
94 supplemental interrupt 1 IVG7 70
95
96 Softirq IVG14
97 System Call --
98 (lowest priority) IVG15
99
100 **********************************************************************/
101
102#define SYS_IRQS 71
103#define NR_PERI_INTS 64
104
105/*
106 * The ABSTRACT IRQ definitions
107 * the first seven of the following are fixed,
108 * the rest you change if you need to.
109 */
110/* IVG 0-6*/
111#define IRQ_EMU 0 /* Emulation */
112#define IRQ_RST 1 /* Reset */
113#define IRQ_NMI 2 /* Non Maskable Interrupt */
114#define IRQ_EVX 3 /* Exception */
115#define IRQ_UNUSED 4 /* Reserved interrupt */
116#define IRQ_HWERR 5 /* Hardware Error */
117#define IRQ_CORETMR 6 /* Core timer */
118
119#define IVG_BASE 7
120/* IVG 7 */
121#define IRQ_PLL_WAKEUP (IVG_BASE + 0) /* PLL Wakeup Interrupt */
122#define IRQ_DMA1_ERROR (IVG_BASE + 1) /* DMA1 Error (general) */
123#define IRQ_DMA_ERROR IRQ_DMA1_ERROR /* DMA1 Error (general) */
124#define IRQ_DMA2_ERROR (IVG_BASE + 2) /* DMA2 Error (general) */
125#define IRQ_IMDMA_ERROR (IVG_BASE + 3) /* IMDMA Error Interrupt */
126#define IRQ_PPI1_ERROR (IVG_BASE + 4) /* PPI1 Error Interrupt */
127#define IRQ_PPI_ERROR IRQ_PPI1_ERROR /* PPI1 Error Interrupt */
128#define IRQ_PPI2_ERROR (IVG_BASE + 5) /* PPI2 Error Interrupt */
129#define IRQ_SPORT0_ERROR (IVG_BASE + 6) /* SPORT0 Error Interrupt */
130#define IRQ_SPORT1_ERROR (IVG_BASE + 7) /* SPORT1 Error Interrupt */
131#define IRQ_SPI_ERROR (IVG_BASE + 8) /* SPI Error Interrupt */
132#define IRQ_UART_ERROR (IVG_BASE + 9) /* UART Error Interrupt */
133#define IRQ_RESERVED_ERROR (IVG_BASE + 10) /* Reversed Interrupt */
134/* IVG 8 */
135#define IRQ_DMA1_0 (IVG_BASE + 11) /* DMA1 0 Interrupt(PPI1) */
136#define IRQ_PPI IRQ_DMA1_0 /* DMA1 0 Interrupt(PPI1) */
137#define IRQ_PPI0 IRQ_DMA1_0 /* DMA1 0 Interrupt(PPI1) */
138#define IRQ_DMA1_1 (IVG_BASE + 12) /* DMA1 1 Interrupt(PPI2) */
139#define IRQ_PPI1 IRQ_DMA1_1 /* DMA1 1 Interrupt(PPI2) */
140#define IRQ_DMA1_2 (IVG_BASE + 13) /* DMA1 2 Interrupt */
141#define IRQ_DMA1_3 (IVG_BASE + 14) /* DMA1 3 Interrupt */
142#define IRQ_DMA1_4 (IVG_BASE + 15) /* DMA1 4 Interrupt */
143#define IRQ_DMA1_5 (IVG_BASE + 16) /* DMA1 5 Interrupt */
144#define IRQ_DMA1_6 (IVG_BASE + 17) /* DMA1 6 Interrupt */
145#define IRQ_DMA1_7 (IVG_BASE + 18) /* DMA1 7 Interrupt */
146#define IRQ_DMA1_8 (IVG_BASE + 19) /* DMA1 8 Interrupt */
147#define IRQ_DMA1_9 (IVG_BASE + 20) /* DMA1 9 Interrupt */
148#define IRQ_DMA1_10 (IVG_BASE + 21) /* DMA1 10 Interrupt */
149#define IRQ_DMA1_11 (IVG_BASE + 22) /* DMA1 11 Interrupt */
150/* IVG 9 */
151#define IRQ_DMA2_0 (IVG_BASE + 23) /* DMA2 0 (SPORT0 RX) */
152#define IRQ_SPORT0_RX IRQ_DMA2_0 /* DMA2 0 (SPORT0 RX) */
153#define IRQ_DMA2_1 (IVG_BASE + 24) /* DMA2 1 (SPORT0 TX) */
154#define IRQ_SPORT0_TX IRQ_DMA2_1 /* DMA2 1 (SPORT0 TX) */
155#define IRQ_DMA2_2 (IVG_BASE + 25) /* DMA2 2 (SPORT1 RX) */
156#define IRQ_SPORT1_RX IRQ_DMA2_2 /* DMA2 2 (SPORT1 RX) */
157#define IRQ_DMA2_3 (IVG_BASE + 26) /* DMA2 3 (SPORT2 TX) */
158#define IRQ_SPORT1_TX IRQ_DMA2_3 /* DMA2 3 (SPORT2 TX) */
159#define IRQ_DMA2_4 (IVG_BASE + 27) /* DMA2 4 (SPI) */
160#define IRQ_SPI IRQ_DMA2_4 /* DMA2 4 (SPI) */
161#define IRQ_DMA2_5 (IVG_BASE + 28) /* DMA2 5 (UART RX) */
162#define IRQ_UART_RX IRQ_DMA2_5 /* DMA2 5 (UART RX) */
163#define IRQ_DMA2_6 (IVG_BASE + 29) /* DMA2 6 (UART TX) */
164#define IRQ_UART_TX IRQ_DMA2_6 /* DMA2 6 (UART TX) */
165#define IRQ_DMA2_7 (IVG_BASE + 30) /* DMA2 7 Interrupt */
166#define IRQ_DMA2_8 (IVG_BASE + 31) /* DMA2 8 Interrupt */
167#define IRQ_DMA2_9 (IVG_BASE + 32) /* DMA2 9 Interrupt */
168#define IRQ_DMA2_10 (IVG_BASE + 33) /* DMA2 10 Interrupt */
169#define IRQ_DMA2_11 (IVG_BASE + 34) /* DMA2 11 Interrupt */
170/* IVG 10 */
171#define IRQ_TIMER0 (IVG_BASE + 35) /* TIMER 0 Interrupt */
172#define IRQ_TIMER1 (IVG_BASE + 36) /* TIMER 1 Interrupt */
173#define IRQ_TIMER2 (IVG_BASE + 37) /* TIMER 2 Interrupt */
174#define IRQ_TIMER3 (IVG_BASE + 38) /* TIMER 3 Interrupt */
175#define IRQ_TIMER4 (IVG_BASE + 39) /* TIMER 4 Interrupt */
176#define IRQ_TIMER5 (IVG_BASE + 40) /* TIMER 5 Interrupt */
177#define IRQ_TIMER6 (IVG_BASE + 41) /* TIMER 6 Interrupt */
178#define IRQ_TIMER7 (IVG_BASE + 42) /* TIMER 7 Interrupt */
179#define IRQ_TIMER8 (IVG_BASE + 43) /* TIMER 8 Interrupt */
180#define IRQ_TIMER9 (IVG_BASE + 44) /* TIMER 9 Interrupt */
181#define IRQ_TIMER10 (IVG_BASE + 45) /* TIMER 10 Interrupt */
182#define IRQ_TIMER11 (IVG_BASE + 46) /* TIMER 11 Interrupt */
183/* IVG 11 */
184#define IRQ_PROG0_INTA (IVG_BASE + 47) /* Programmable Flags0 A (8) */
185#define IRQ_PROG_INTA IRQ_PROG0_INTA /* Programmable Flags0 A (8) */
186#define IRQ_PROG0_INTB (IVG_BASE + 48) /* Programmable Flags0 B (8) */
187#define IRQ_PROG_INTB IRQ_PROG0_INTB /* Programmable Flags0 B (8) */
188#define IRQ_PROG1_INTA (IVG_BASE + 49) /* Programmable Flags1 A (8) */
189#define IRQ_PROG1_INTB (IVG_BASE + 50) /* Programmable Flags1 B (8) */
190#define IRQ_PROG2_INTA (IVG_BASE + 51) /* Programmable Flags2 A (8) */
191#define IRQ_PROG2_INTB (IVG_BASE + 52) /* Programmable Flags2 B (8) */
192/* IVG 8 */
193#define IRQ_DMA1_WRRD0 (IVG_BASE + 53) /* MDMA1 0 write/read INT */
194#define IRQ_DMA_WRRD0 IRQ_DMA1_WRRD0 /* MDMA1 0 write/read INT */
195#define IRQ_MEM_DMA0 IRQ_DMA1_WRRD0 83#define IRQ_MEM_DMA0 IRQ_DMA1_WRRD0
196#define IRQ_DMA1_WRRD1 (IVG_BASE + 54) /* MDMA1 1 write/read INT */ 84#define IRQ_DMA1_WRRD1 BFIN_IRQ(54) /* MDMA1 1 write/read INT */
197#define IRQ_DMA_WRRD1 IRQ_DMA1_WRRD1 /* MDMA1 1 write/read INT */ 85#define IRQ_DMA_WRRD1 IRQ_DMA1_WRRD1 /* MDMA1 1 write/read INT */
198#define IRQ_MEM_DMA1 IRQ_DMA1_WRRD1 86#define IRQ_MEM_DMA1 IRQ_DMA1_WRRD1
199/* IVG 9 */ 87#define IRQ_DMA2_WRRD0 BFIN_IRQ(55) /* MDMA2 0 write/read INT */
200#define IRQ_DMA2_WRRD0 (IVG_BASE + 55) /* MDMA2 0 write/read INT */
201#define IRQ_MEM_DMA2 IRQ_DMA2_WRRD0 88#define IRQ_MEM_DMA2 IRQ_DMA2_WRRD0
202#define IRQ_DMA2_WRRD1 (IVG_BASE + 56) /* MDMA2 1 write/read INT */ 89#define IRQ_DMA2_WRRD1 BFIN_IRQ(56) /* MDMA2 1 write/read INT */
203#define IRQ_MEM_DMA3 IRQ_DMA2_WRRD1 90#define IRQ_MEM_DMA3 IRQ_DMA2_WRRD1
204/* IVG 12 */ 91#define IRQ_IMDMA_WRRD0 BFIN_IRQ(57) /* IMDMA 0 write/read INT */
205#define IRQ_IMDMA_WRRD0 (IVG_BASE + 57) /* IMDMA 0 write/read INT */
206#define IRQ_IMEM_DMA0 IRQ_IMDMA_WRRD0 92#define IRQ_IMEM_DMA0 IRQ_IMDMA_WRRD0
207#define IRQ_IMDMA_WRRD1 (IVG_BASE + 58) /* IMDMA 1 write/read INT */ 93#define IRQ_IMDMA_WRRD1 BFIN_IRQ(58) /* IMDMA 1 write/read INT */
208#define IRQ_IMEM_DMA1 IRQ_IMDMA_WRRD1 94#define IRQ_IMEM_DMA1 IRQ_IMDMA_WRRD1
209/* IVG 13 */ 95#define IRQ_WATCH BFIN_IRQ(59) /* Watch Dog Timer */
210#define IRQ_WATCH (IVG_BASE + 59) /* Watch Dog Timer */ 96#define IRQ_RESERVED_1 BFIN_IRQ(60) /* Reserved interrupt */
211/* IVG 7 */ 97#define IRQ_RESERVED_2 BFIN_IRQ(61) /* Reserved interrupt */
212#define IRQ_RESERVED_1 (IVG_BASE + 60) /* Reserved interrupt */ 98#define IRQ_SUPPLE_0 BFIN_IRQ(62) /* Supplemental interrupt 0 */
213#define IRQ_RESERVED_2 (IVG_BASE + 61) /* Reserved interrupt */ 99#define IRQ_SUPPLE_1 BFIN_IRQ(63) /* supplemental interrupt 1 */
214#define IRQ_SUPPLE_0 (IVG_BASE + 62) /* Supplemental interrupt 0 */ 100
215#define IRQ_SUPPLE_1 (IVG_BASE + 63) /* supplemental interrupt 1 */ 101#define SYS_IRQS 71
216 102
217#define IRQ_PF0 73 103#define IRQ_PF0 73
218#define IRQ_PF1 74 104#define IRQ_PF1 74
@@ -266,158 +152,85 @@
266#define GPIO_IRQ_BASE IRQ_PF0 152#define GPIO_IRQ_BASE IRQ_PF0
267 153
268#define NR_MACH_IRQS (IRQ_PF47 + 1) 154#define NR_MACH_IRQS (IRQ_PF47 + 1)
269#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS)
270
271#define IVG7 7
272#define IVG8 8
273#define IVG9 9
274#define IVG10 10
275#define IVG11 11
276#define IVG12 12
277#define IVG13 13
278#define IVG14 14
279#define IVG15 15
280
281/*
282 * DEFAULT PRIORITIES:
283 */
284
285#define CONFIG_DEF_PLL_WAKEUP 7
286#define CONFIG_DEF_DMA1_ERROR 7
287#define CONFIG_DEF_DMA2_ERROR 7
288#define CONFIG_DEF_IMDMA_ERROR 7
289#define CONFIG_DEF_PPI1_ERROR 7
290#define CONFIG_DEF_PPI2_ERROR 7
291#define CONFIG_DEF_SPORT0_ERROR 7
292#define CONFIG_DEF_SPORT1_ERROR 7
293#define CONFIG_DEF_SPI_ERROR 7
294#define CONFIG_DEF_UART_ERROR 7
295#define CONFIG_DEF_RESERVED_ERROR 7
296#define CONFIG_DEF_DMA1_0 8
297#define CONFIG_DEF_DMA1_1 8
298#define CONFIG_DEF_DMA1_2 8
299#define CONFIG_DEF_DMA1_3 8
300#define CONFIG_DEF_DMA1_4 8
301#define CONFIG_DEF_DMA1_5 8
302#define CONFIG_DEF_DMA1_6 8
303#define CONFIG_DEF_DMA1_7 8
304#define CONFIG_DEF_DMA1_8 8
305#define CONFIG_DEF_DMA1_9 8
306#define CONFIG_DEF_DMA1_10 8
307#define CONFIG_DEF_DMA1_11 8
308#define CONFIG_DEF_DMA2_0 9
309#define CONFIG_DEF_DMA2_1 9
310#define CONFIG_DEF_DMA2_2 9
311#define CONFIG_DEF_DMA2_3 9
312#define CONFIG_DEF_DMA2_4 9
313#define CONFIG_DEF_DMA2_5 9
314#define CONFIG_DEF_DMA2_6 9
315#define CONFIG_DEF_DMA2_7 9
316#define CONFIG_DEF_DMA2_8 9
317#define CONFIG_DEF_DMA2_9 9
318#define CONFIG_DEF_DMA2_10 9
319#define CONFIG_DEF_DMA2_11 9
320#define CONFIG_DEF_TIMER0 10
321#define CONFIG_DEF_TIMER1 10
322#define CONFIG_DEF_TIMER2 10
323#define CONFIG_DEF_TIMER3 10
324#define CONFIG_DEF_TIMER4 10
325#define CONFIG_DEF_TIMER5 10
326#define CONFIG_DEF_TIMER6 10
327#define CONFIG_DEF_TIMER7 10
328#define CONFIG_DEF_TIMER8 10
329#define CONFIG_DEF_TIMER9 10
330#define CONFIG_DEF_TIMER10 10
331#define CONFIG_DEF_TIMER11 10
332#define CONFIG_DEF_PROG0_INTA 11
333#define CONFIG_DEF_PROG0_INTB 11
334#define CONFIG_DEF_PROG1_INTA 11
335#define CONFIG_DEF_PROG1_INTB 11
336#define CONFIG_DEF_PROG2_INTA 11
337#define CONFIG_DEF_PROG2_INTB 11
338#define CONFIG_DEF_DMA1_WRRD0 8
339#define CONFIG_DEF_DMA1_WRRD1 8
340#define CONFIG_DEF_DMA2_WRRD0 9
341#define CONFIG_DEF_DMA2_WRRD1 9
342#define CONFIG_DEF_IMDMA_WRRD0 12
343#define CONFIG_DEF_IMDMA_WRRD1 12
344#define CONFIG_DEF_WATCH 13
345#define CONFIG_DEF_RESERVED_1 7
346#define CONFIG_DEF_RESERVED_2 7
347#define CONFIG_DEF_SUPPLE_0 7
348#define CONFIG_DEF_SUPPLE_1 7
349 155
350/* IAR0 BIT FIELDS */ 156/* IAR0 BIT FIELDS */
351#define IRQ_PLL_WAKEUP_POS 0 157#define IRQ_PLL_WAKEUP_POS 0
352#define IRQ_DMA1_ERROR_POS 4 158#define IRQ_DMA1_ERROR_POS 4
353#define IRQ_DMA2_ERROR_POS 8 159#define IRQ_DMA2_ERROR_POS 8
354#define IRQ_IMDMA_ERROR_POS 12 160#define IRQ_IMDMA_ERROR_POS 12
355#define IRQ_PPI0_ERROR_POS 16 161#define IRQ_PPI0_ERROR_POS 16
356#define IRQ_PPI1_ERROR_POS 20 162#define IRQ_PPI1_ERROR_POS 20
357#define IRQ_SPORT0_ERROR_POS 24 163#define IRQ_SPORT0_ERROR_POS 24
358#define IRQ_SPORT1_ERROR_POS 28 164#define IRQ_SPORT1_ERROR_POS 28
165
359/* IAR1 BIT FIELDS */ 166/* IAR1 BIT FIELDS */
360#define IRQ_SPI_ERROR_POS 0 167#define IRQ_SPI_ERROR_POS 0
361#define IRQ_UART_ERROR_POS 4 168#define IRQ_UART_ERROR_POS 4
362#define IRQ_RESERVED_ERROR_POS 8 169#define IRQ_RESERVED_ERROR_POS 8
363#define IRQ_DMA1_0_POS 12 170#define IRQ_DMA1_0_POS 12
364#define IRQ_DMA1_1_POS 16 171#define IRQ_DMA1_1_POS 16
365#define IRQ_DMA1_2_POS 20 172#define IRQ_DMA1_2_POS 20
366#define IRQ_DMA1_3_POS 24 173#define IRQ_DMA1_3_POS 24
367#define IRQ_DMA1_4_POS 28 174#define IRQ_DMA1_4_POS 28
175
368/* IAR2 BIT FIELDS */ 176/* IAR2 BIT FIELDS */
369#define IRQ_DMA1_5_POS 0 177#define IRQ_DMA1_5_POS 0
370#define IRQ_DMA1_6_POS 4 178#define IRQ_DMA1_6_POS 4
371#define IRQ_DMA1_7_POS 8 179#define IRQ_DMA1_7_POS 8
372#define IRQ_DMA1_8_POS 12 180#define IRQ_DMA1_8_POS 12
373#define IRQ_DMA1_9_POS 16 181#define IRQ_DMA1_9_POS 16
374#define IRQ_DMA1_10_POS 20 182#define IRQ_DMA1_10_POS 20
375#define IRQ_DMA1_11_POS 24 183#define IRQ_DMA1_11_POS 24
376#define IRQ_DMA2_0_POS 28 184#define IRQ_DMA2_0_POS 28
185
377/* IAR3 BIT FIELDS */ 186/* IAR3 BIT FIELDS */
378#define IRQ_DMA2_1_POS 0 187#define IRQ_DMA2_1_POS 0
379#define IRQ_DMA2_2_POS 4 188#define IRQ_DMA2_2_POS 4
380#define IRQ_DMA2_3_POS 8 189#define IRQ_DMA2_3_POS 8
381#define IRQ_DMA2_4_POS 12 190#define IRQ_DMA2_4_POS 12
382#define IRQ_DMA2_5_POS 16 191#define IRQ_DMA2_5_POS 16
383#define IRQ_DMA2_6_POS 20 192#define IRQ_DMA2_6_POS 20
384#define IRQ_DMA2_7_POS 24 193#define IRQ_DMA2_7_POS 24
385#define IRQ_DMA2_8_POS 28 194#define IRQ_DMA2_8_POS 28
195
386/* IAR4 BIT FIELDS */ 196/* IAR4 BIT FIELDS */
387#define IRQ_DMA2_9_POS 0 197#define IRQ_DMA2_9_POS 0
388#define IRQ_DMA2_10_POS 4 198#define IRQ_DMA2_10_POS 4
389#define IRQ_DMA2_11_POS 8 199#define IRQ_DMA2_11_POS 8
390#define IRQ_TIMER0_POS 12 200#define IRQ_TIMER0_POS 12
391#define IRQ_TIMER1_POS 16 201#define IRQ_TIMER1_POS 16
392#define IRQ_TIMER2_POS 20 202#define IRQ_TIMER2_POS 20
393#define IRQ_TIMER3_POS 24 203#define IRQ_TIMER3_POS 24
394#define IRQ_TIMER4_POS 28 204#define IRQ_TIMER4_POS 28
205
395/* IAR5 BIT FIELDS */ 206/* IAR5 BIT FIELDS */
396#define IRQ_TIMER5_POS 0 207#define IRQ_TIMER5_POS 0
397#define IRQ_TIMER6_POS 4 208#define IRQ_TIMER6_POS 4
398#define IRQ_TIMER7_POS 8 209#define IRQ_TIMER7_POS 8
399#define IRQ_TIMER8_POS 12 210#define IRQ_TIMER8_POS 12
400#define IRQ_TIMER9_POS 16 211#define IRQ_TIMER9_POS 16
401#define IRQ_TIMER10_POS 20 212#define IRQ_TIMER10_POS 20
402#define IRQ_TIMER11_POS 24 213#define IRQ_TIMER11_POS 24
403#define IRQ_PROG0_INTA_POS 28 214#define IRQ_PROG0_INTA_POS 28
215
404/* IAR6 BIT FIELDS */ 216/* IAR6 BIT FIELDS */
405#define IRQ_PROG0_INTB_POS 0 217#define IRQ_PROG0_INTB_POS 0
406#define IRQ_PROG1_INTA_POS 4 218#define IRQ_PROG1_INTA_POS 4
407#define IRQ_PROG1_INTB_POS 8 219#define IRQ_PROG1_INTB_POS 8
408#define IRQ_PROG2_INTA_POS 12 220#define IRQ_PROG2_INTA_POS 12
409#define IRQ_PROG2_INTB_POS 16 221#define IRQ_PROG2_INTB_POS 16
410#define IRQ_DMA1_WRRD0_POS 20 222#define IRQ_DMA1_WRRD0_POS 20
411#define IRQ_DMA1_WRRD1_POS 24 223#define IRQ_DMA1_WRRD1_POS 24
412#define IRQ_DMA2_WRRD0_POS 28 224#define IRQ_DMA2_WRRD0_POS 28
413/* IAR7 BIT FIELDS */
414#define IRQ_DMA2_WRRD1_POS 0
415#define IRQ_IMDMA_WRRD0_POS 4
416#define IRQ_IMDMA_WRRD1_POS 8
417#define IRQ_WDTIMER_POS 12
418#define IRQ_RESERVED_1_POS 16
419#define IRQ_RESERVED_2_POS 20
420#define IRQ_SUPPLE_0_POS 24
421#define IRQ_SUPPLE_1_POS 28
422 225
423#endif /* _BF561_IRQ_H_ */ 226/* IAR7 BIT FIELDS */
227#define IRQ_DMA2_WRRD1_POS 0
228#define IRQ_IMDMA_WRRD0_POS 4
229#define IRQ_IMDMA_WRRD1_POS 8
230#define IRQ_WDTIMER_POS 12
231#define IRQ_RESERVED_1_POS 16
232#define IRQ_RESERVED_2_POS 20
233#define IRQ_SUPPLE_0_POS 24
234#define IRQ_SUPPLE_1_POS 28
235
236#endif
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index 7b07740cf68c..85abd8be1343 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -24,17 +24,23 @@ static DEFINE_SPINLOCK(boot_lock);
24 24
25void __init platform_init_cpus(void) 25void __init platform_init_cpus(void)
26{ 26{
27 cpu_set(0, cpu_possible_map); /* CoreA */ 27 struct cpumask mask;
28 cpu_set(1, cpu_possible_map); /* CoreB */ 28
29 cpumask_set_cpu(0, &mask); /* CoreA */
30 cpumask_set_cpu(1, &mask); /* CoreB */
31 init_cpu_possible(&mask);
29} 32}
30 33
31void __init platform_prepare_cpus(unsigned int max_cpus) 34void __init platform_prepare_cpus(unsigned int max_cpus)
32{ 35{
36 struct cpumask mask;
37
33 bfin_relocate_coreb_l1_mem(); 38 bfin_relocate_coreb_l1_mem();
34 39
35 /* Both cores ought to be present on a bf561! */ 40 /* Both cores ought to be present on a bf561! */
36 cpu_set(0, cpu_present_map); /* CoreA */ 41 cpumask_set_cpu(0, &mask); /* CoreA */
37 cpu_set(1, cpu_present_map); /* CoreB */ 42 cpumask_set_cpu(1, &mask); /* CoreB */
43 init_cpu_present(&mask);
38} 44}
39 45
40int __init setup_profiling_timer(unsigned int multiplier) /* not supported */ 46int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
@@ -62,9 +68,6 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
62 bfin_write_SICB_IWR1(IWR_DISABLE_ALL); 68 bfin_write_SICB_IWR1(IWR_DISABLE_ALL);
63 SSYNC(); 69 SSYNC();
64 70
65 /* Store CPU-private information to the cpu_data array. */
66 bfin_setup_cpudata(cpu);
67
68 /* We are done with local CPU inits, unblock the boot CPU. */ 71 /* We are done with local CPU inits, unblock the boot CPU. */
69 set_cpu_online(cpu, true); 72 set_cpu_online(cpu, true);
70 spin_lock(&boot_lock); 73 spin_lock(&boot_lock);
diff --git a/arch/blackfin/mach-common/dpmc.c b/arch/blackfin/mach-common/dpmc.c
index 5e4112e518a9..f5685a496c58 100644
--- a/arch/blackfin/mach-common/dpmc.c
+++ b/arch/blackfin/mach-common/dpmc.c
@@ -85,10 +85,11 @@ static void bfin_wakeup_cpu(void)
85{ 85{
86 unsigned int cpu; 86 unsigned int cpu;
87 unsigned int this_cpu = smp_processor_id(); 87 unsigned int this_cpu = smp_processor_id();
88 cpumask_t mask = cpu_online_map; 88 cpumask_t mask;
89 89
90 cpu_clear(this_cpu, mask); 90 cpumask_copy(&mask, cpu_online_mask);
91 for_each_cpu_mask(cpu, mask) 91 cpumask_clear_cpu(this_cpu, &mask);
92 for_each_cpu(cpu, &mask)
92 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); 93 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
93} 94}
94 95
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 43d9fb195c1e..1177369f9922 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -19,32 +19,14 @@
19#ifdef CONFIG_IPIPE 19#ifdef CONFIG_IPIPE
20#include <linux/ipipe.h> 20#include <linux/ipipe.h>
21#endif 21#endif
22#ifdef CONFIG_KGDB
23#include <linux/kgdb.h>
24#endif
25#include <asm/traps.h> 22#include <asm/traps.h>
26#include <asm/blackfin.h> 23#include <asm/blackfin.h>
27#include <asm/gpio.h> 24#include <asm/gpio.h>
28#include <asm/irq_handler.h> 25#include <asm/irq_handler.h>
29#include <asm/dpmc.h> 26#include <asm/dpmc.h>
30#include <asm/bfin5xx_spi.h>
31#include <asm/bfin_sport.h>
32#include <asm/bfin_can.h>
33 27
34#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) 28#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
35 29
36#ifdef BF537_FAMILY
37# define BF537_GENERIC_ERROR_INT_DEMUX
38# define SPI_ERR_MASK (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE) /* SPI_STAT */
39# define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORT_STAT */
40# define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */
41# define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */
42# define UART_ERR_MASK (0x6) /* UART_IIR */
43# define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */
44#else
45# undef BF537_GENERIC_ERROR_INT_DEMUX
46#endif
47
48/* 30/*
49 * NOTES: 31 * NOTES:
50 * - we have separated the physical Hardware interrupt from the 32 * - we have separated the physical Hardware interrupt from the
@@ -63,22 +45,19 @@ unsigned long bfin_irq_flags = 0x1f;
63EXPORT_SYMBOL(bfin_irq_flags); 45EXPORT_SYMBOL(bfin_irq_flags);
64#endif 46#endif
65 47
66/* The number of spurious interrupts */
67atomic_t num_spurious;
68
69#ifdef CONFIG_PM 48#ifdef CONFIG_PM
70unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */ 49unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
71unsigned vr_wakeup; 50unsigned vr_wakeup;
72#endif 51#endif
73 52
74struct ivgx { 53static struct ivgx {
75 /* irq number for request_irq, available in mach-bf5xx/irq.h */ 54 /* irq number for request_irq, available in mach-bf5xx/irq.h */
76 unsigned int irqno; 55 unsigned int irqno;
77 /* corresponding bit in the SIC_ISR register */ 56 /* corresponding bit in the SIC_ISR register */
78 unsigned int isrflag; 57 unsigned int isrflag;
79} ivg_table[NR_PERI_INTS]; 58} ivg_table[NR_PERI_INTS];
80 59
81struct ivg_slice { 60static struct ivg_slice {
82 /* position of first irq in ivg_table for given ivg */ 61 /* position of first irq in ivg_table for given ivg */
83 struct ivgx *ifirst; 62 struct ivgx *ifirst;
84 struct ivgx *istop; 63 struct ivgx *istop;
@@ -125,7 +104,7 @@ static void __init search_IAR(void)
125 * This is for core internal IRQs 104 * This is for core internal IRQs
126 */ 105 */
127 106
128static void bfin_ack_noop(struct irq_data *d) 107void bfin_ack_noop(struct irq_data *d)
129{ 108{
130 /* Dummy function. */ 109 /* Dummy function. */
131} 110}
@@ -154,26 +133,24 @@ static void bfin_core_unmask_irq(struct irq_data *d)
154 return; 133 return;
155} 134}
156 135
157static void bfin_internal_mask_irq(unsigned int irq) 136void bfin_internal_mask_irq(unsigned int irq)
158{ 137{
159 unsigned long flags; 138 unsigned long flags = hard_local_irq_save();
160 139
161#ifdef CONFIG_BF53x 140#ifdef SIC_IMASK0
162 flags = hard_local_irq_save(); 141 unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
163 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & 142 unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
164 ~(1 << SIC_SYSIRQ(irq)));
165#else
166 unsigned mask_bank, mask_bit;
167 flags = hard_local_irq_save();
168 mask_bank = SIC_SYSIRQ(irq) / 32;
169 mask_bit = SIC_SYSIRQ(irq) % 32;
170 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & 143 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
171 ~(1 << mask_bit)); 144 ~(1 << mask_bit));
172#ifdef CONFIG_SMP 145# ifdef CONFIG_SMP
173 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) & 146 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
174 ~(1 << mask_bit)); 147 ~(1 << mask_bit));
148# endif
149#else
150 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
151 ~(1 << SIC_SYSIRQ(irq)));
175#endif 152#endif
176#endif 153
177 hard_local_irq_restore(flags); 154 hard_local_irq_restore(flags);
178} 155}
179 156
@@ -186,33 +163,31 @@ static void bfin_internal_mask_irq_chip(struct irq_data *d)
186static void bfin_internal_unmask_irq_affinity(unsigned int irq, 163static void bfin_internal_unmask_irq_affinity(unsigned int irq,
187 const struct cpumask *affinity) 164 const struct cpumask *affinity)
188#else 165#else
189static void bfin_internal_unmask_irq(unsigned int irq) 166void bfin_internal_unmask_irq(unsigned int irq)
190#endif 167#endif
191{ 168{
192 unsigned long flags; 169 unsigned long flags = hard_local_irq_save();
193 170
194#ifdef CONFIG_BF53x 171#ifdef SIC_IMASK0
195 flags = hard_local_irq_save(); 172 unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
196 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 173 unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
197 (1 << SIC_SYSIRQ(irq))); 174# ifdef CONFIG_SMP
198#else
199 unsigned mask_bank, mask_bit;
200 flags = hard_local_irq_save();
201 mask_bank = SIC_SYSIRQ(irq) / 32;
202 mask_bit = SIC_SYSIRQ(irq) % 32;
203#ifdef CONFIG_SMP
204 if (cpumask_test_cpu(0, affinity)) 175 if (cpumask_test_cpu(0, affinity))
205#endif 176# endif
206 bfin_write_SIC_IMASK(mask_bank, 177 bfin_write_SIC_IMASK(mask_bank,
207 bfin_read_SIC_IMASK(mask_bank) | 178 bfin_read_SIC_IMASK(mask_bank) |
208 (1 << mask_bit)); 179 (1 << mask_bit));
209#ifdef CONFIG_SMP 180# ifdef CONFIG_SMP
210 if (cpumask_test_cpu(1, affinity)) 181 if (cpumask_test_cpu(1, affinity))
211 bfin_write_SICB_IMASK(mask_bank, 182 bfin_write_SICB_IMASK(mask_bank,
212 bfin_read_SICB_IMASK(mask_bank) | 183 bfin_read_SICB_IMASK(mask_bank) |
213 (1 << mask_bit)); 184 (1 << mask_bit));
185# endif
186#else
187 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
188 (1 << SIC_SYSIRQ(irq)));
214#endif 189#endif
215#endif 190
216 hard_local_irq_restore(flags); 191 hard_local_irq_restore(flags);
217} 192}
218 193
@@ -295,6 +270,8 @@ static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
295{ 270{
296 return bfin_internal_set_wake(d->irq, state); 271 return bfin_internal_set_wake(d->irq, state);
297} 272}
273#else
274# define bfin_internal_set_wake_chip NULL
298#endif 275#endif
299 276
300static struct irq_chip bfin_core_irqchip = { 277static struct irq_chip bfin_core_irqchip = {
@@ -315,12 +292,10 @@ static struct irq_chip bfin_internal_irqchip = {
315#ifdef CONFIG_SMP 292#ifdef CONFIG_SMP
316 .irq_set_affinity = bfin_internal_set_affinity, 293 .irq_set_affinity = bfin_internal_set_affinity,
317#endif 294#endif
318#ifdef CONFIG_PM
319 .irq_set_wake = bfin_internal_set_wake_chip, 295 .irq_set_wake = bfin_internal_set_wake_chip,
320#endif
321}; 296};
322 297
323static void bfin_handle_irq(unsigned irq) 298void bfin_handle_irq(unsigned irq)
324{ 299{
325#ifdef CONFIG_IPIPE 300#ifdef CONFIG_IPIPE
326 struct pt_regs regs; /* Contents not used. */ 301 struct pt_regs regs; /* Contents not used. */
@@ -332,102 +307,6 @@ static void bfin_handle_irq(unsigned irq)
332#endif /* !CONFIG_IPIPE */ 307#endif /* !CONFIG_IPIPE */
333} 308}
334 309
335#ifdef BF537_GENERIC_ERROR_INT_DEMUX
336static int error_int_mask;
337
338static void bfin_generic_error_mask_irq(struct irq_data *d)
339{
340 error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR));
341 if (!error_int_mask)
342 bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
343}
344
345static void bfin_generic_error_unmask_irq(struct irq_data *d)
346{
347 bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
348 error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR);
349}
350
351static struct irq_chip bfin_generic_error_irqchip = {
352 .name = "ERROR",
353 .irq_ack = bfin_ack_noop,
354 .irq_mask_ack = bfin_generic_error_mask_irq,
355 .irq_mask = bfin_generic_error_mask_irq,
356 .irq_unmask = bfin_generic_error_unmask_irq,
357};
358
359static void bfin_demux_error_irq(unsigned int int_err_irq,
360 struct irq_desc *inta_desc)
361{
362 int irq = 0;
363
364#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
365 if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
366 irq = IRQ_MAC_ERROR;
367 else
368#endif
369 if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
370 irq = IRQ_SPORT0_ERROR;
371 else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
372 irq = IRQ_SPORT1_ERROR;
373 else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
374 irq = IRQ_PPI_ERROR;
375 else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
376 irq = IRQ_CAN_ERROR;
377 else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
378 irq = IRQ_SPI_ERROR;
379 else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
380 irq = IRQ_UART0_ERROR;
381 else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
382 irq = IRQ_UART1_ERROR;
383
384 if (irq) {
385 if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
386 bfin_handle_irq(irq);
387 else {
388
389 switch (irq) {
390 case IRQ_PPI_ERROR:
391 bfin_write_PPI_STATUS(PPI_ERR_MASK);
392 break;
393#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
394 case IRQ_MAC_ERROR:
395 bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
396 break;
397#endif
398 case IRQ_SPORT0_ERROR:
399 bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
400 break;
401
402 case IRQ_SPORT1_ERROR:
403 bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
404 break;
405
406 case IRQ_CAN_ERROR:
407 bfin_write_CAN_GIS(CAN_ERR_MASK);
408 break;
409
410 case IRQ_SPI_ERROR:
411 bfin_write_SPI_STAT(SPI_ERR_MASK);
412 break;
413
414 default:
415 break;
416 }
417
418 pr_debug("IRQ %d:"
419 " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
420 irq);
421 }
422 } else
423 printk(KERN_ERR
424 "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR"
425 " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
426 __func__, __FILE__, __LINE__);
427
428}
429#endif /* BF537_GENERIC_ERROR_INT_DEMUX */
430
431#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 310#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
432static int mac_stat_int_mask; 311static int mac_stat_int_mask;
433 312
@@ -468,7 +347,7 @@ static void bfin_mac_status_mask_irq(struct irq_data *d)
468 unsigned int irq = d->irq; 347 unsigned int irq = d->irq;
469 348
470 mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT)); 349 mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
471#ifdef BF537_GENERIC_ERROR_INT_DEMUX 350#ifdef BF537_FAMILY
472 switch (irq) { 351 switch (irq) {
473 case IRQ_MAC_PHYINT: 352 case IRQ_MAC_PHYINT:
474 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE); 353 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
@@ -487,7 +366,7 @@ static void bfin_mac_status_unmask_irq(struct irq_data *d)
487{ 366{
488 unsigned int irq = d->irq; 367 unsigned int irq = d->irq;
489 368
490#ifdef BF537_GENERIC_ERROR_INT_DEMUX 369#ifdef BF537_FAMILY
491 switch (irq) { 370 switch (irq) {
492 case IRQ_MAC_PHYINT: 371 case IRQ_MAC_PHYINT:
493 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE); 372 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
@@ -505,12 +384,14 @@ static void bfin_mac_status_unmask_irq(struct irq_data *d)
505#ifdef CONFIG_PM 384#ifdef CONFIG_PM
506int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state) 385int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
507{ 386{
508#ifdef BF537_GENERIC_ERROR_INT_DEMUX 387#ifdef BF537_FAMILY
509 return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state); 388 return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
510#else 389#else
511 return bfin_internal_set_wake(IRQ_MAC_ERROR, state); 390 return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
512#endif 391#endif
513} 392}
393#else
394# define bfin_mac_status_set_wake NULL
514#endif 395#endif
515 396
516static struct irq_chip bfin_mac_status_irqchip = { 397static struct irq_chip bfin_mac_status_irqchip = {
@@ -519,13 +400,11 @@ static struct irq_chip bfin_mac_status_irqchip = {
519 .irq_mask_ack = bfin_mac_status_mask_irq, 400 .irq_mask_ack = bfin_mac_status_mask_irq,
520 .irq_mask = bfin_mac_status_mask_irq, 401 .irq_mask = bfin_mac_status_mask_irq,
521 .irq_unmask = bfin_mac_status_unmask_irq, 402 .irq_unmask = bfin_mac_status_unmask_irq,
522#ifdef CONFIG_PM
523 .irq_set_wake = bfin_mac_status_set_wake, 403 .irq_set_wake = bfin_mac_status_set_wake,
524#endif
525}; 404};
526 405
527static void bfin_demux_mac_status_irq(unsigned int int_err_irq, 406void bfin_demux_mac_status_irq(unsigned int int_err_irq,
528 struct irq_desc *inta_desc) 407 struct irq_desc *inta_desc)
529{ 408{
530 int i, irq = 0; 409 int i, irq = 0;
531 u32 status = bfin_read_EMAC_SYSTAT(); 410 u32 status = bfin_read_EMAC_SYSTAT();
@@ -680,29 +559,48 @@ static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
680} 559}
681 560
682#ifdef CONFIG_PM 561#ifdef CONFIG_PM
683int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) 562static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
684{ 563{
685 return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state); 564 return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
686} 565}
566#else
567# define bfin_gpio_set_wake NULL
687#endif 568#endif
688 569
689static void bfin_demux_gpio_irq(unsigned int inta_irq, 570static void bfin_demux_gpio_block(unsigned int irq)
690 struct irq_desc *desc)
691{ 571{
692 unsigned int i, gpio, mask, irq, search = 0; 572 unsigned int gpio, mask;
573
574 gpio = irq_to_gpio(irq);
575 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
576
577 while (mask) {
578 if (mask & 1)
579 bfin_handle_irq(irq);
580 irq++;
581 mask >>= 1;
582 }
583}
584
585void bfin_demux_gpio_irq(unsigned int inta_irq,
586 struct irq_desc *desc)
587{
588 unsigned int irq;
693 589
694 switch (inta_irq) { 590 switch (inta_irq) {
695#if defined(CONFIG_BF53x) 591#if defined(BF537_FAMILY)
696 case IRQ_PROG_INTA: 592 case IRQ_PF_INTA_PG_INTA:
697 irq = IRQ_PF0; 593 bfin_demux_gpio_block(IRQ_PF0);
698 search = 1; 594 irq = IRQ_PG0;
699 break; 595 break;
700# if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) 596 case IRQ_PH_INTA_MAC_RX:
701 case IRQ_MAC_RX:
702 irq = IRQ_PH0; 597 irq = IRQ_PH0;
703 break; 598 break;
704# endif 599#elif defined(BF533_FAMILY)
705#elif defined(CONFIG_BF538) || defined(CONFIG_BF539) 600 case IRQ_PROG_INTA:
601 irq = IRQ_PF0;
602 break;
603#elif defined(BF538_FAMILY)
706 case IRQ_PORTF_INTA: 604 case IRQ_PORTF_INTA:
707 irq = IRQ_PF0; 605 irq = IRQ_PF0;
708 break; 606 break;
@@ -732,31 +630,7 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
732 return; 630 return;
733 } 631 }
734 632
735 if (search) { 633 bfin_demux_gpio_block(irq);
736 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
737 irq += i;
738
739 mask = get_gpiop_data(i) & get_gpiop_maska(i);
740
741 while (mask) {
742 if (mask & 1)
743 bfin_handle_irq(irq);
744 irq++;
745 mask >>= 1;
746 }
747 }
748 } else {
749 gpio = irq_to_gpio(irq);
750 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
751
752 do {
753 if (mask & 1)
754 bfin_handle_irq(irq);
755 irq++;
756 mask >>= 1;
757 } while (mask);
758 }
759
760} 634}
761 635
762#else /* CONFIG_BF54x */ 636#else /* CONFIG_BF54x */
@@ -974,15 +848,11 @@ static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
974} 848}
975 849
976#ifdef CONFIG_PM 850#ifdef CONFIG_PM
977u32 pint_saved_masks[NR_PINT_SYS_IRQS]; 851static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
978u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
979
980int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
981{ 852{
982 u32 pint_irq; 853 u32 pint_irq;
983 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; 854 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
984 u32 bank = PINT_2_BANK(pint_val); 855 u32 bank = PINT_2_BANK(pint_val);
985 u32 pintbit = PINT_BIT(pint_val);
986 856
987 switch (bank) { 857 switch (bank) {
988 case 0: 858 case 0:
@@ -1003,46 +873,14 @@ int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
1003 873
1004 bfin_internal_set_wake(pint_irq, state); 874 bfin_internal_set_wake(pint_irq, state);
1005 875
1006 if (state)
1007 pint_wakeup_masks[bank] |= pintbit;
1008 else
1009 pint_wakeup_masks[bank] &= ~pintbit;
1010
1011 return 0; 876 return 0;
1012} 877}
1013 878#else
1014u32 bfin_pm_setup(void) 879# define bfin_gpio_set_wake NULL
1015{
1016 u32 val, i;
1017
1018 for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
1019 val = pint[i]->mask_clear;
1020 pint_saved_masks[i] = val;
1021 if (val ^ pint_wakeup_masks[i]) {
1022 pint[i]->mask_clear = val;
1023 pint[i]->mask_set = pint_wakeup_masks[i];
1024 }
1025 }
1026
1027 return 0;
1028}
1029
1030void bfin_pm_restore(void)
1031{
1032 u32 i, val;
1033
1034 for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
1035 val = pint_saved_masks[i];
1036 if (val ^ pint_wakeup_masks[i]) {
1037 pint[i]->mask_clear = pint[i]->mask_clear;
1038 pint[i]->mask_set = val;
1039 }
1040 }
1041}
1042#endif 880#endif
1043 881
1044static void bfin_demux_gpio_irq(unsigned int inta_irq, 882void bfin_demux_gpio_irq(unsigned int inta_irq,
1045 struct irq_desc *desc) 883 struct irq_desc *desc)
1046{ 884{
1047 u32 bank, pint_val; 885 u32 bank, pint_val;
1048 u32 request, irq; 886 u32 request, irq;
@@ -1091,9 +929,7 @@ static struct irq_chip bfin_gpio_irqchip = {
1091 .irq_set_type = bfin_gpio_irq_type, 929 .irq_set_type = bfin_gpio_irq_type,
1092 .irq_startup = bfin_gpio_irq_startup, 930 .irq_startup = bfin_gpio_irq_startup,
1093 .irq_shutdown = bfin_gpio_irq_shutdown, 931 .irq_shutdown = bfin_gpio_irq_shutdown,
1094#ifdef CONFIG_PM
1095 .irq_set_wake = bfin_gpio_set_wake, 932 .irq_set_wake = bfin_gpio_set_wake,
1096#endif
1097}; 933};
1098 934
1099void __cpuinit init_exception_vectors(void) 935void __cpuinit init_exception_vectors(void)
@@ -1127,12 +963,12 @@ int __init init_arch_irq(void)
1127{ 963{
1128 int irq; 964 int irq;
1129 unsigned long ilat = 0; 965 unsigned long ilat = 0;
966
1130 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */ 967 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
1131#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \ 968#ifdef SIC_IMASK0
1132 || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
1133 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL); 969 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
1134 bfin_write_SIC_IMASK1(SIC_UNMASK_ALL); 970 bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
1135# ifdef CONFIG_BF54x 971# ifdef SIC_IMASK2
1136 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); 972 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
1137# endif 973# endif
1138# ifdef CONFIG_SMP 974# ifdef CONFIG_SMP
@@ -1145,11 +981,6 @@ int __init init_arch_irq(void)
1145 981
1146 local_irq_disable(); 982 local_irq_disable();
1147 983
1148#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
1149 /* Clear EMAC Interrupt Status bits so we can demux it later */
1150 bfin_write_EMAC_SYSTAT(-1);
1151#endif
1152
1153#ifdef CONFIG_BF54x 984#ifdef CONFIG_BF54x
1154# ifdef CONFIG_PINTx_REASSIGN 985# ifdef CONFIG_PINTx_REASSIGN
1155 pint[0]->assign = CONFIG_PINT0_ASSIGN; 986 pint[0]->assign = CONFIG_PINT0_ASSIGN;
@@ -1168,11 +999,11 @@ int __init init_arch_irq(void)
1168 irq_set_chip(irq, &bfin_internal_irqchip); 999 irq_set_chip(irq, &bfin_internal_irqchip);
1169 1000
1170 switch (irq) { 1001 switch (irq) {
1171#if defined(CONFIG_BF53x) 1002#if defined(BF537_FAMILY)
1003 case IRQ_PH_INTA_MAC_RX:
1004 case IRQ_PF_INTA_PG_INTA:
1005#elif defined(BF533_FAMILY)
1172 case IRQ_PROG_INTA: 1006 case IRQ_PROG_INTA:
1173# if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
1174 case IRQ_MAC_RX:
1175# endif
1176#elif defined(CONFIG_BF54x) 1007#elif defined(CONFIG_BF54x)
1177 case IRQ_PINT0: 1008 case IRQ_PINT0:
1178 case IRQ_PINT1: 1009 case IRQ_PINT1:
@@ -1186,16 +1017,11 @@ int __init init_arch_irq(void)
1186 case IRQ_PROG0_INTA: 1017 case IRQ_PROG0_INTA:
1187 case IRQ_PROG1_INTA: 1018 case IRQ_PROG1_INTA:
1188 case IRQ_PROG2_INTA: 1019 case IRQ_PROG2_INTA:
1189#elif defined(CONFIG_BF538) || defined(CONFIG_BF539) 1020#elif defined(BF538_FAMILY)
1190 case IRQ_PORTF_INTA: 1021 case IRQ_PORTF_INTA:
1191#endif 1022#endif
1192 irq_set_chained_handler(irq, bfin_demux_gpio_irq); 1023 irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1193 break; 1024 break;
1194#ifdef BF537_GENERIC_ERROR_INT_DEMUX
1195 case IRQ_GENERIC_ERROR:
1196 irq_set_chained_handler(irq, bfin_demux_error_irq);
1197 break;
1198#endif
1199#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 1025#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1200 case IRQ_MAC_ERROR: 1026 case IRQ_MAC_ERROR:
1201 irq_set_chained_handler(irq, 1027 irq_set_chained_handler(irq,
@@ -1213,11 +1039,10 @@ int __init init_arch_irq(void)
1213 case IRQ_CORETMR: 1039 case IRQ_CORETMR:
1214# ifdef CONFIG_SMP 1040# ifdef CONFIG_SMP
1215 irq_set_handler(irq, handle_percpu_irq); 1041 irq_set_handler(irq, handle_percpu_irq);
1216 break;
1217# else 1042# else
1218 irq_set_handler(irq, handle_simple_irq); 1043 irq_set_handler(irq, handle_simple_irq);
1219 break;
1220# endif 1044# endif
1045 break;
1221#endif 1046#endif
1222 1047
1223#ifdef CONFIG_TICKSOURCE_GPTMR0 1048#ifdef CONFIG_TICKSOURCE_GPTMR0
@@ -1226,26 +1051,17 @@ int __init init_arch_irq(void)
1226 break; 1051 break;
1227#endif 1052#endif
1228 1053
1229#ifdef CONFIG_IPIPE
1230 default: 1054 default:
1055#ifdef CONFIG_IPIPE
1231 irq_set_handler(irq, handle_level_irq); 1056 irq_set_handler(irq, handle_level_irq);
1232 break; 1057#else
1233#else /* !CONFIG_IPIPE */
1234 default:
1235 irq_set_handler(irq, handle_simple_irq); 1058 irq_set_handler(irq, handle_simple_irq);
1059#endif
1236 break; 1060 break;
1237#endif /* !CONFIG_IPIPE */
1238 } 1061 }
1239 } 1062 }
1240 1063
1241#ifdef BF537_GENERIC_ERROR_INT_DEMUX 1064 init_mach_irq();
1242 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
1243 irq_set_chip_and_handler(irq, &bfin_generic_error_irqchip,
1244 handle_level_irq);
1245#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1246 irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
1247#endif
1248#endif
1249 1065
1250#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 1066#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1251 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) 1067 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
@@ -1307,53 +1123,54 @@ int __init init_arch_irq(void)
1307#ifdef CONFIG_DO_IRQ_L1 1123#ifdef CONFIG_DO_IRQ_L1
1308__attribute__((l1_text)) 1124__attribute__((l1_text))
1309#endif 1125#endif
1310void do_irq(int vec, struct pt_regs *fp) 1126static int vec_to_irq(int vec)
1311{ 1127{
1312 if (vec == EVT_IVTMR_P) { 1128 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1313 vec = IRQ_CORETMR; 1129 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1314 } else { 1130 unsigned long sic_status[3];
1315 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst; 1131
1316 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop; 1132 if (likely(vec == EVT_IVTMR_P))
1317#if defined(SIC_ISR0) 1133 return IRQ_CORETMR;
1318 unsigned long sic_status[3];
1319 1134
1320 if (smp_processor_id()) { 1135#ifdef SIC_ISR
1136 sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1137#else
1138 if (smp_processor_id()) {
1321# ifdef SICB_ISR0 1139# ifdef SICB_ISR0
1322 /* This will be optimized out in UP mode. */ 1140 /* This will be optimized out in UP mode. */
1323 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0(); 1141 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1324 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1(); 1142 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1325# endif
1326 } else {
1327 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1328 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1329 }
1330# ifdef SIC_ISR2
1331 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1332# endif 1143# endif
1333 for (;; ivg++) { 1144 } else {
1334 if (ivg >= ivg_stop) { 1145 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1335 atomic_inc(&num_spurious); 1146 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1336 return; 1147 }
1337 } 1148#endif
1338 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag) 1149#ifdef SIC_ISR2
1339 break; 1150 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1340 } 1151#endif
1341#else
1342 unsigned long sic_status;
1343
1344 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1345 1152
1346 for (;; ivg++) { 1153 for (;; ivg++) {
1347 if (ivg >= ivg_stop) { 1154 if (ivg >= ivg_stop)
1348 atomic_inc(&num_spurious); 1155 return -1;
1349 return; 1156#ifdef SIC_ISR
1350 } else if (sic_status & ivg->isrflag) 1157 if (sic_status[0] & ivg->isrflag)
1351 break; 1158#else
1352 } 1159 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1353#endif 1160#endif
1354 vec = ivg->irqno; 1161 return ivg->irqno;
1355 } 1162 }
1356 asm_do_IRQ(vec, fp); 1163}
1164
1165#ifdef CONFIG_DO_IRQ_L1
1166__attribute__((l1_text))
1167#endif
1168void do_irq(int vec, struct pt_regs *fp)
1169{
1170 int irq = vec_to_irq(vec);
1171 if (irq == -1)
1172 return;
1173 asm_do_IRQ(irq, fp);
1357} 1174}
1358 1175
1359#ifdef CONFIG_IPIPE 1176#ifdef CONFIG_IPIPE
@@ -1391,40 +1208,9 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1391 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; 1208 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1392 int irq, s = 0; 1209 int irq, s = 0;
1393 1210
1394 if (likely(vec == EVT_IVTMR_P)) 1211 irq = vec_to_irq(vec);
1395 irq = IRQ_CORETMR; 1212 if (irq == -1)
1396 else { 1213 return 0;
1397#if defined(SIC_ISR0)
1398 unsigned long sic_status[3];
1399
1400 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1401 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1402# ifdef SIC_ISR2
1403 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1404# endif
1405 for (;; ivg++) {
1406 if (ivg >= ivg_stop) {
1407 atomic_inc(&num_spurious);
1408 return 0;
1409 }
1410 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1411 break;
1412 }
1413#else
1414 unsigned long sic_status;
1415
1416 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1417
1418 for (;; ivg++) {
1419 if (ivg >= ivg_stop) {
1420 atomic_inc(&num_spurious);
1421 return 0;
1422 } else if (sic_status & ivg->isrflag)
1423 break;
1424 }
1425#endif
1426 irq = ivg->irqno;
1427 }
1428 1214
1429 if (irq == IRQ_SYSTMR) { 1215 if (irq == IRQ_SYSTMR) {
1430#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0) 1216#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 1fbd94c44457..35e7e1eb0188 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <asm/atomic.h> 26#include <asm/atomic.h>
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28#include <asm/irq_handler.h>
28#include <asm/mmu_context.h> 29#include <asm/mmu_context.h>
29#include <asm/pgtable.h> 30#include <asm/pgtable.h>
30#include <asm/pgalloc.h> 31#include <asm/pgalloc.h>
@@ -96,7 +97,7 @@ static void ipi_cpu_stop(unsigned int cpu)
96 dump_stack(); 97 dump_stack();
97 spin_unlock(&stop_lock); 98 spin_unlock(&stop_lock);
98 99
99 cpu_clear(cpu, cpu_online_map); 100 set_cpu_online(cpu, false);
100 101
101 local_irq_disable(); 102 local_irq_disable();
102 103
@@ -146,7 +147,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
146 */ 147 */
147 resync_core_dcache(); 148 resync_core_dcache();
148#endif 149#endif
149 cpu_clear(cpu, *msg->call_struct.waitmask); 150 cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
150 } 151 }
151} 152}
152 153
@@ -222,9 +223,10 @@ static inline void smp_send_message(cpumask_t callmap, unsigned long type,
222 struct ipi_message_queue *msg_queue; 223 struct ipi_message_queue *msg_queue;
223 struct ipi_message *msg; 224 struct ipi_message *msg;
224 unsigned long flags, next_msg; 225 unsigned long flags, next_msg;
225 cpumask_t waitmask = callmap; /* waitmask is shared by all cpus */ 226 cpumask_t waitmask; /* waitmask is shared by all cpus */
226 227
227 for_each_cpu_mask(cpu, callmap) { 228 cpumask_copy(&waitmask, &callmap);
229 for_each_cpu(cpu, &callmap) {
228 msg_queue = &per_cpu(ipi_msg_queue, cpu); 230 msg_queue = &per_cpu(ipi_msg_queue, cpu);
229 spin_lock_irqsave(&msg_queue->lock, flags); 231 spin_lock_irqsave(&msg_queue->lock, flags);
230 if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { 232 if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
@@ -246,7 +248,7 @@ static inline void smp_send_message(cpumask_t callmap, unsigned long type,
246 } 248 }
247 249
248 if (wait) { 250 if (wait) {
249 while (!cpus_empty(waitmask)) 251 while (!cpumask_empty(&waitmask))
250 blackfin_dcache_invalidate_range( 252 blackfin_dcache_invalidate_range(
251 (unsigned long)(&waitmask), 253 (unsigned long)(&waitmask),
252 (unsigned long)(&waitmask)); 254 (unsigned long)(&waitmask));
@@ -265,9 +267,9 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
265 cpumask_t callmap; 267 cpumask_t callmap;
266 268
267 preempt_disable(); 269 preempt_disable();
268 callmap = cpu_online_map; 270 cpumask_copy(&callmap, cpu_online_mask);
269 cpu_clear(smp_processor_id(), callmap); 271 cpumask_clear_cpu(smp_processor_id(), &callmap);
270 if (!cpus_empty(callmap)) 272 if (!cpumask_empty(&callmap))
271 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); 273 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
272 274
273 preempt_enable(); 275 preempt_enable();
@@ -284,8 +286,8 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
284 286
285 if (cpu_is_offline(cpu)) 287 if (cpu_is_offline(cpu))
286 return 0; 288 return 0;
287 cpus_clear(callmap); 289 cpumask_clear(&callmap);
288 cpu_set(cpu, callmap); 290 cpumask_set_cpu(cpu, &callmap);
289 291
290 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); 292 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
291 293
@@ -308,9 +310,9 @@ void smp_send_stop(void)
308 cpumask_t callmap; 310 cpumask_t callmap;
309 311
310 preempt_disable(); 312 preempt_disable();
311 callmap = cpu_online_map; 313 cpumask_copy(&callmap, cpu_online_mask);
312 cpu_clear(smp_processor_id(), callmap); 314 cpumask_clear_cpu(smp_processor_id(), &callmap);
313 if (!cpus_empty(callmap)) 315 if (!cpumask_empty(&callmap))
314 smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); 316 smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
315 317
316 preempt_enable(); 318 preempt_enable();
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index dfd304a4a3ea..29d98faa1efd 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/poll.h> 16#include <linux/poll.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
18#include <linux/spinlock.h> 19#include <linux/spinlock.h>
19#include <linux/rtc.h> 20#include <linux/rtc.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
@@ -764,7 +765,7 @@ EXPORT_SYMBOL(sram_alloc_with_lsl);
764/* Need to keep line of output the same. Currently, that is 44 bytes 765/* Need to keep line of output the same. Currently, that is 44 bytes
765 * (including newline). 766 * (including newline).
766 */ 767 */
767static int _sram_proc_read(char *buf, int *len, int count, const char *desc, 768static int _sram_proc_show(struct seq_file *m, const char *desc,
768 struct sram_piece *pfree_head, 769 struct sram_piece *pfree_head,
769 struct sram_piece *pused_head) 770 struct sram_piece *pused_head)
770{ 771{
@@ -773,13 +774,13 @@ static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
773 if (!pfree_head || !pused_head) 774 if (!pfree_head || !pused_head)
774 return -1; 775 return -1;
775 776
776 *len += sprintf(&buf[*len], "--- SRAM %-14s Size PID State \n", desc); 777 seq_printf(m, "--- SRAM %-14s Size PID State \n", desc);
777 778
778 /* search the relevant memory slot */ 779 /* search the relevant memory slot */
779 pslot = pused_head->next; 780 pslot = pused_head->next;
780 781
781 while (pslot != NULL) { 782 while (pslot != NULL) {
782 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n", 783 seq_printf(m, "%p-%p %10i %5i %-10s\n",
783 pslot->paddr, pslot->paddr + pslot->size, 784 pslot->paddr, pslot->paddr + pslot->size,
784 pslot->size, pslot->pid, "ALLOCATED"); 785 pslot->size, pslot->pid, "ALLOCATED");
785 786
@@ -789,7 +790,7 @@ static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
789 pslot = pfree_head->next; 790 pslot = pfree_head->next;
790 791
791 while (pslot != NULL) { 792 while (pslot != NULL) {
792 *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n", 793 seq_printf(m, "%p-%p %10i %5i %-10s\n",
793 pslot->paddr, pslot->paddr + pslot->size, 794 pslot->paddr, pslot->paddr + pslot->size,
794 pslot->size, pslot->pid, "FREE"); 795 pslot->size, pslot->pid, "FREE");
795 796
@@ -798,54 +799,62 @@ static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
798 799
799 return 0; 800 return 0;
800} 801}
801static int sram_proc_read(char *buf, char **start, off_t offset, int count, 802static int sram_proc_show(struct seq_file *m, void *v)
802 int *eof, void *data)
803{ 803{
804 int len = 0;
805 unsigned int cpu; 804 unsigned int cpu;
806 805
807 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 806 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
808 if (_sram_proc_read(buf, &len, count, "Scratchpad", 807 if (_sram_proc_show(m, "Scratchpad",
809 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu))) 808 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
810 goto not_done; 809 goto not_done;
811#if L1_DATA_A_LENGTH != 0 810#if L1_DATA_A_LENGTH != 0
812 if (_sram_proc_read(buf, &len, count, "L1 Data A", 811 if (_sram_proc_show(m, "L1 Data A",
813 &per_cpu(free_l1_data_A_sram_head, cpu), 812 &per_cpu(free_l1_data_A_sram_head, cpu),
814 &per_cpu(used_l1_data_A_sram_head, cpu))) 813 &per_cpu(used_l1_data_A_sram_head, cpu)))
815 goto not_done; 814 goto not_done;
816#endif 815#endif
817#if L1_DATA_B_LENGTH != 0 816#if L1_DATA_B_LENGTH != 0
818 if (_sram_proc_read(buf, &len, count, "L1 Data B", 817 if (_sram_proc_show(m, "L1 Data B",
819 &per_cpu(free_l1_data_B_sram_head, cpu), 818 &per_cpu(free_l1_data_B_sram_head, cpu),
820 &per_cpu(used_l1_data_B_sram_head, cpu))) 819 &per_cpu(used_l1_data_B_sram_head, cpu)))
821 goto not_done; 820 goto not_done;
822#endif 821#endif
823#if L1_CODE_LENGTH != 0 822#if L1_CODE_LENGTH != 0
824 if (_sram_proc_read(buf, &len, count, "L1 Instruction", 823 if (_sram_proc_show(m, "L1 Instruction",
825 &per_cpu(free_l1_inst_sram_head, cpu), 824 &per_cpu(free_l1_inst_sram_head, cpu),
826 &per_cpu(used_l1_inst_sram_head, cpu))) 825 &per_cpu(used_l1_inst_sram_head, cpu)))
827 goto not_done; 826 goto not_done;
828#endif 827#endif
829 } 828 }
830#if L2_LENGTH != 0 829#if L2_LENGTH != 0
831 if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head, 830 if (_sram_proc_show(m, "L2", &free_l2_sram_head, &used_l2_sram_head))
832 &used_l2_sram_head))
833 goto not_done; 831 goto not_done;
834#endif 832#endif
835 *eof = 1;
836 not_done: 833 not_done:
837 return len; 834 return 0;
835}
836
837static int sram_proc_open(struct inode *inode, struct file *file)
838{
839 return single_open(file, sram_proc_show, NULL);
838} 840}
839 841
842static const struct file_operations sram_proc_ops = {
843 .open = sram_proc_open,
844 .read = seq_read,
845 .llseek = seq_lseek,
846 .release = single_release,
847};
848
840static int __init sram_proc_init(void) 849static int __init sram_proc_init(void)
841{ 850{
842 struct proc_dir_entry *ptr; 851 struct proc_dir_entry *ptr;
843 ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL); 852
853 ptr = proc_create("sram", S_IRUGO, NULL, &sram_proc_ops);
844 if (!ptr) { 854 if (!ptr) {
845 printk(KERN_WARNING "unable to create /proc/sram\n"); 855 printk(KERN_WARNING "unable to create /proc/sram\n");
846 return -1; 856 return -1;
847 } 857 }
848 ptr->read_proc = sram_proc_read;
849 return 0; 858 return 0;
850} 859}
851late_initcall(sram_proc_init); 860late_initcall(sram_proc_init);
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 68a1a5901ca5..5ebe6e841820 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -266,11 +266,11 @@ static int irq_cpu(int irq)
266 266
267 267
268 /* Let the interrupt stay if possible */ 268 /* Let the interrupt stay if possible */
269 if (cpu_isset(cpu, irq_allocations[irq - FIRST_IRQ].mask)) 269 if (cpumask_test_cpu(cpu, &irq_allocations[irq - FIRST_IRQ].mask))
270 goto out; 270 goto out;
271 271
272 /* IRQ must be moved to another CPU. */ 272 /* IRQ must be moved to another CPU. */
273 cpu = first_cpu(irq_allocations[irq - FIRST_IRQ].mask); 273 cpu = cpumask_first(&irq_allocations[irq - FIRST_IRQ].mask);
274 irq_allocations[irq - FIRST_IRQ].cpu = cpu; 274 irq_allocations[irq - FIRST_IRQ].cpu = cpu;
275out: 275out:
276 spin_unlock_irqrestore(&irq_lock, flags); 276 spin_unlock_irqrestore(&irq_lock, flags);
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 66cc75657e2f..a0843a71aaee 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -81,7 +81,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
81 81
82 /* Mark all possible CPUs as present */ 82 /* Mark all possible CPUs as present */
83 for (i = 0; i < max_cpus; i++) 83 for (i = 0; i < max_cpus; i++)
84 cpu_set(i, phys_cpu_present_map); 84 cpumask_set_cpu(i, &phys_cpu_present_map);
85} 85}
86 86
87void __devinit smp_prepare_boot_cpu(void) 87void __devinit smp_prepare_boot_cpu(void)
@@ -98,7 +98,7 @@ void __devinit smp_prepare_boot_cpu(void)
98 SUPP_REG_WR(RW_MM_TLB_PGD, pgd); 98 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
99 99
100 set_cpu_online(0, true); 100 set_cpu_online(0, true);
101 cpu_set(0, phys_cpu_present_map); 101 cpumask_set_cpu(0, &phys_cpu_present_map);
102 set_cpu_possible(0, true); 102 set_cpu_possible(0, true);
103} 103}
104 104
@@ -112,8 +112,9 @@ smp_boot_one_cpu(int cpuid)
112{ 112{
113 unsigned timeout; 113 unsigned timeout;
114 struct task_struct *idle; 114 struct task_struct *idle;
115 cpumask_t cpu_mask = CPU_MASK_NONE; 115 cpumask_t cpu_mask;
116 116
117 cpumask_clear(&cpu_mask);
117 idle = fork_idle(cpuid); 118 idle = fork_idle(cpuid);
118 if (IS_ERR(idle)) 119 if (IS_ERR(idle))
119 panic("SMP: fork failed for CPU:%d", cpuid); 120 panic("SMP: fork failed for CPU:%d", cpuid);
@@ -125,10 +126,10 @@ smp_boot_one_cpu(int cpuid)
125 cpu_now_booting = cpuid; 126 cpu_now_booting = cpuid;
126 127
127 /* Kick it */ 128 /* Kick it */
128 cpu_set(cpuid, cpu_online_map); 129 set_cpu_online(cpuid, true);
129 cpu_set(cpuid, cpu_mask); 130 cpumask_set_cpu(cpuid, &cpu_mask);
130 send_ipi(IPI_BOOT, 0, cpu_mask); 131 send_ipi(IPI_BOOT, 0, cpu_mask);
131 cpu_clear(cpuid, cpu_online_map); 132 set_cpu_online(cpuid, false);
132 133
133 /* Wait for CPU to come online */ 134 /* Wait for CPU to come online */
134 for (timeout = 0; timeout < 10000; timeout++) { 135 for (timeout = 0; timeout < 10000; timeout++) {
@@ -176,7 +177,7 @@ void __init smp_callin(void)
176 notify_cpu_starting(cpu); 177 notify_cpu_starting(cpu);
177 local_irq_enable(); 178 local_irq_enable();
178 179
179 cpu_set(cpu, cpu_online_map); 180 set_cpu_online(cpu, true);
180 cpu_idle(); 181 cpu_idle();
181} 182}
182 183
@@ -214,8 +215,9 @@ int __cpuinit __cpu_up(unsigned int cpu)
214 215
215void smp_send_reschedule(int cpu) 216void smp_send_reschedule(int cpu)
216{ 217{
217 cpumask_t cpu_mask = CPU_MASK_NONE; 218 cpumask_t cpu_mask;
218 cpu_set(cpu, cpu_mask); 219 cpumask_clear(&cpu_mask);
220 cpumask_set_cpu(cpu, &cpu_mask);
219 send_ipi(IPI_SCHEDULE, 0, cpu_mask); 221 send_ipi(IPI_SCHEDULE, 0, cpu_mask);
220} 222}
221 223
@@ -232,7 +234,7 @@ void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned
232 234
233 spin_lock_irqsave(&tlbstate_lock, flags); 235 spin_lock_irqsave(&tlbstate_lock, flags);
234 cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm)); 236 cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
235 cpu_clear(smp_processor_id(), cpu_mask); 237 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
236 flush_mm = mm; 238 flush_mm = mm;
237 flush_vma = vma; 239 flush_vma = vma;
238 flush_addr = addr; 240 flush_addr = addr;
@@ -277,10 +279,10 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
277 int ret = 0; 279 int ret = 0;
278 280
279 /* Calculate CPUs to send to. */ 281 /* Calculate CPUs to send to. */
280 cpus_and(cpu_mask, cpu_mask, cpu_online_map); 282 cpumask_and(&cpu_mask, &cpu_mask, cpu_online_mask);
281 283
282 /* Send the IPI. */ 284 /* Send the IPI. */
283 for_each_cpu_mask(i, cpu_mask) 285 for_each_cpu(i, &cpu_mask)
284 { 286 {
285 ipi.vector |= vector; 287 ipi.vector |= vector;
286 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi); 288 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
@@ -288,7 +290,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
288 290
289 /* Wait for IPI to finish on other CPUS */ 291 /* Wait for IPI to finish on other CPUS */
290 if (wait) { 292 if (wait) {
291 for_each_cpu_mask(i, cpu_mask) { 293 for_each_cpu(i, &cpu_mask) {
292 int j; 294 int j;
293 for (j = 0 ; j < 1000; j++) { 295 for (j = 0 ; j < 1000; j++) {
294 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); 296 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
@@ -314,11 +316,12 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask)
314 */ 316 */
315int smp_call_function(void (*func)(void *info), void *info, int wait) 317int smp_call_function(void (*func)(void *info), void *info, int wait)
316{ 318{
317 cpumask_t cpu_mask = CPU_MASK_ALL; 319 cpumask_t cpu_mask;
318 struct call_data_struct data; 320 struct call_data_struct data;
319 int ret; 321 int ret;
320 322
321 cpu_clear(smp_processor_id(), cpu_mask); 323 cpumask_setall(&cpu_mask);
324 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
322 325
323 WARN_ON(irqs_disabled()); 326 WARN_ON(irqs_disabled());
324 327
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index df33ab89d70f..d72ab58fd83e 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -13,8 +13,6 @@
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <asm/tlb.h> 14#include <asm/tlb.h>
15 15
16DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
17
18unsigned long empty_zero_page; 16unsigned long empty_zero_page;
19 17
20extern char _stext, _edata, _etext; /* From linkerscript */ 18extern char _stext, _edata, _etext; /* From linkerscript */
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index ed64588ac3a7..fbe5f0dbae06 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -41,8 +41,6 @@
41 41
42#undef DEBUG 42#undef DEBUG
43 43
44DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
45
46/* 44/*
47 * BAD_PAGE is the page that is used for page faults when linux 45 * BAD_PAGE is the page that is used for page faults when linux
48 * is out-of-memory. Older versions of linux just did a 46 * is out-of-memory. Older versions of linux just did a
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 23cce999eb1c..c3ffe3e54edc 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -47,21 +47,27 @@
47#include <asm/machvec.h> 47#include <asm/machvec.h>
48 48
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50# define FREE_PTE_NR 2048
51# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) 50# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
52#else 51#else
53# define FREE_PTE_NR 0
54# define tlb_fast_mode(tlb) (1) 52# define tlb_fast_mode(tlb) (1)
55#endif 53#endif
56 54
55/*
56 * If we can't allocate a page to make a big batch of page pointers
57 * to work on, then just handle a few from the on-stack structure.
58 */
59#define IA64_GATHER_BUNDLE 8
60
57struct mmu_gather { 61struct mmu_gather {
58 struct mm_struct *mm; 62 struct mm_struct *mm;
59 unsigned int nr; /* == ~0U => fast mode */ 63 unsigned int nr; /* == ~0U => fast mode */
64 unsigned int max;
60 unsigned char fullmm; /* non-zero means full mm flush */ 65 unsigned char fullmm; /* non-zero means full mm flush */
61 unsigned char need_flush; /* really unmapped some PTEs? */ 66 unsigned char need_flush; /* really unmapped some PTEs? */
62 unsigned long start_addr; 67 unsigned long start_addr;
63 unsigned long end_addr; 68 unsigned long end_addr;
64 struct page *pages[FREE_PTE_NR]; 69 struct page **pages;
70 struct page *local[IA64_GATHER_BUNDLE];
65}; 71};
66 72
67struct ia64_tr_entry { 73struct ia64_tr_entry {
@@ -90,9 +96,6 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
90#define RR_RID_MASK 0x00000000ffffff00L 96#define RR_RID_MASK 0x00000000ffffff00L
91#define RR_TO_RID(val) ((val >> 8) & 0xffffff) 97#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
92 98
93/* Users of the generic TLB shootdown code must declare this storage space. */
94DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
95
96/* 99/*
97 * Flush the TLB for address range START to END and, if not in fast mode, release the 100 * Flush the TLB for address range START to END and, if not in fast mode, release the
98 * freed pages that where gathered up to this point. 101 * freed pages that where gathered up to this point.
@@ -147,15 +150,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
147 } 150 }
148} 151}
149 152
150/* 153static inline void __tlb_alloc_page(struct mmu_gather *tlb)
151 * Return a pointer to an initialized struct mmu_gather.
152 */
153static inline struct mmu_gather *
154tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
155{ 154{
156 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 155 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
157 156
157 if (addr) {
158 tlb->pages = (void *)addr;
159 tlb->max = PAGE_SIZE / sizeof(void *);
160 }
161}
162
163
164static inline void
165tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
166{
158 tlb->mm = mm; 167 tlb->mm = mm;
168 tlb->max = ARRAY_SIZE(tlb->local);
169 tlb->pages = tlb->local;
159 /* 170 /*
160 * Use fast mode if only 1 CPU is online. 171 * Use fast mode if only 1 CPU is online.
161 * 172 *
@@ -172,7 +183,6 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
172 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; 183 tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
173 tlb->fullmm = full_mm_flush; 184 tlb->fullmm = full_mm_flush;
174 tlb->start_addr = ~0UL; 185 tlb->start_addr = ~0UL;
175 return tlb;
176} 186}
177 187
178/* 188/*
@@ -180,7 +190,7 @@ tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
180 * collected. 190 * collected.
181 */ 191 */
182static inline void 192static inline void
183tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 193tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
184{ 194{
185 /* 195 /*
186 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and 196 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
@@ -191,7 +201,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
191 /* keep the page table cache within bounds */ 201 /* keep the page table cache within bounds */
192 check_pgt_cache(); 202 check_pgt_cache();
193 203
194 put_cpu_var(mmu_gathers); 204 if (tlb->pages != tlb->local)
205 free_pages((unsigned long)tlb->pages, 0);
195} 206}
196 207
197/* 208/*
@@ -199,18 +210,33 @@ tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
199 * must be delayed until after the TLB has been flushed (see comments at the beginning of 210 * must be delayed until after the TLB has been flushed (see comments at the beginning of
200 * this file). 211 * this file).
201 */ 212 */
202static inline void 213static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
203tlb_remove_page (struct mmu_gather *tlb, struct page *page)
204{ 214{
205 tlb->need_flush = 1; 215 tlb->need_flush = 1;
206 216
207 if (tlb_fast_mode(tlb)) { 217 if (tlb_fast_mode(tlb)) {
208 free_page_and_swap_cache(page); 218 free_page_and_swap_cache(page);
209 return; 219 return 1; /* avoid calling tlb_flush_mmu */
210 } 220 }
221
222 if (!tlb->nr && tlb->pages == tlb->local)
223 __tlb_alloc_page(tlb);
224
211 tlb->pages[tlb->nr++] = page; 225 tlb->pages[tlb->nr++] = page;
212 if (tlb->nr >= FREE_PTE_NR) 226 VM_BUG_ON(tlb->nr > tlb->max);
213 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 227
228 return tlb->max - tlb->nr;
229}
230
231static inline void tlb_flush_mmu(struct mmu_gather *tlb)
232{
233 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
234}
235
236static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
237{
238 if (!__tlb_remove_page(tlb, page))
239 tlb_flush_mmu(tlb);
214} 240}
215 241
216/* 242/*
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 9a018cde5d84..f114a3b14c6a 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -44,13 +44,16 @@ void show_mem(unsigned int filter)
44 pg_data_t *pgdat; 44 pg_data_t *pgdat;
45 45
46 printk(KERN_INFO "Mem-info:\n"); 46 printk(KERN_INFO "Mem-info:\n");
47 show_free_areas(); 47 show_free_areas(filter);
48 printk(KERN_INFO "Node memory in pages:\n"); 48 printk(KERN_INFO "Node memory in pages:\n");
49 for_each_online_pgdat(pgdat) { 49 for_each_online_pgdat(pgdat) {
50 unsigned long present; 50 unsigned long present;
51 unsigned long flags; 51 unsigned long flags;
52 int shared = 0, cached = 0, reserved = 0; 52 int shared = 0, cached = 0, reserved = 0;
53 int nid = pgdat->node_id;
53 54
55 if (skip_free_areas_node(filter, nid))
56 continue;
54 pgdat_resize_lock(pgdat, &flags); 57 pgdat_resize_lock(pgdat, &flags);
55 present = pgdat->node_present_pages; 58 present = pgdat->node_present_pages;
56 for(i = 0; i < pgdat->node_spanned_pages; i++) { 59 for(i = 0; i < pgdat->node_spanned_pages; i++) {
@@ -64,8 +67,7 @@ void show_mem(unsigned int filter)
64 if (max_gap < LARGE_GAP) 67 if (max_gap < LARGE_GAP)
65 continue; 68 continue;
66#endif 69#endif
67 i = vmemmap_find_next_valid_pfn(pgdat->node_id, 70 i = vmemmap_find_next_valid_pfn(nid, i) - 1;
68 i) - 1;
69 continue; 71 continue;
70 } 72 }
71 if (PageReserved(page)) 73 if (PageReserved(page))
@@ -81,7 +83,7 @@ void show_mem(unsigned int filter)
81 total_cached += cached; 83 total_cached += cached;
82 total_shared += shared; 84 total_shared += shared;
83 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " 85 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
84 "shrd: %10d, swpd: %10d\n", pgdat->node_id, 86 "shrd: %10d, swpd: %10d\n", nid,
85 present, reserved, shared, cached); 87 present, reserved, shared, cached);
86 } 88 }
87 printk(KERN_INFO "%ld pages of RAM\n", total_present); 89 printk(KERN_INFO "%ld pages of RAM\n", total_present);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 82ab1bc6afb1..c641333cd997 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -622,13 +622,16 @@ void show_mem(unsigned int filter)
622 pg_data_t *pgdat; 622 pg_data_t *pgdat;
623 623
624 printk(KERN_INFO "Mem-info:\n"); 624 printk(KERN_INFO "Mem-info:\n");
625 show_free_areas(); 625 show_free_areas(filter);
626 printk(KERN_INFO "Node memory in pages:\n"); 626 printk(KERN_INFO "Node memory in pages:\n");
627 for_each_online_pgdat(pgdat) { 627 for_each_online_pgdat(pgdat) {
628 unsigned long present; 628 unsigned long present;
629 unsigned long flags; 629 unsigned long flags;
630 int shared = 0, cached = 0, reserved = 0; 630 int shared = 0, cached = 0, reserved = 0;
631 int nid = pgdat->node_id;
631 632
633 if (skip_free_areas_node(filter, nid))
634 continue;
632 pgdat_resize_lock(pgdat, &flags); 635 pgdat_resize_lock(pgdat, &flags);
633 present = pgdat->node_present_pages; 636 present = pgdat->node_present_pages;
634 for(i = 0; i < pgdat->node_spanned_pages; i++) { 637 for(i = 0; i < pgdat->node_spanned_pages; i++) {
@@ -638,8 +641,7 @@ void show_mem(unsigned int filter)
638 if (pfn_valid(pgdat->node_start_pfn + i)) 641 if (pfn_valid(pgdat->node_start_pfn + i))
639 page = pfn_to_page(pgdat->node_start_pfn + i); 642 page = pfn_to_page(pgdat->node_start_pfn + i);
640 else { 643 else {
641 i = vmemmap_find_next_valid_pfn(pgdat->node_id, 644 i = vmemmap_find_next_valid_pfn(nid, i) - 1;
642 i) - 1;
643 continue; 645 continue;
644 } 646 }
645 if (PageReserved(page)) 647 if (PageReserved(page))
@@ -655,7 +657,7 @@ void show_mem(unsigned int filter)
655 total_cached += cached; 657 total_cached += cached;
656 total_shared += shared; 658 total_shared += shared;
657 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " 659 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
658 "shrd: %10d, swpd: %10d\n", pgdat->node_id, 660 "shrd: %10d, swpd: %10d\n", nid,
659 present, reserved, shared, cached); 661 present, reserved, shared, cached);
660 } 662 }
661 printk(KERN_INFO "%ld pages of RAM\n", total_present); 663 printk(KERN_INFO "%ld pages of RAM\n", total_present);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ed41759efcac..00cb0e26c64e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -36,8 +36,6 @@
36#include <asm/mca.h> 36#include <asm/mca.h>
37#include <asm/paravirt.h> 37#include <asm/paravirt.h>
38 38
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40
41extern void ia64_tlb_init (void); 39extern void ia64_tlb_init (void);
42 40
43unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 41unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
diff --git a/arch/m32r/Kconfig.debug b/arch/m32r/Kconfig.debug
index 2e1019ddbb22..bb1afc1a31cc 100644
--- a/arch/m32r/Kconfig.debug
+++ b/arch/m32r/Kconfig.debug
@@ -9,15 +9,6 @@ config DEBUG_STACKOVERFLOW
9 This option will cause messages to be printed if free stack space 9 This option will cause messages to be printed if free stack space
10 drops below a certain limit. 10 drops below a certain limit.
11 11
12config DEBUG_STACK_USAGE
13 bool "Stack utilization instrumentation"
14 depends on DEBUG_KERNEL
15 help
16 Enables the display of the minimum amount of free stack which each
17 task has ever had available in the sysrq-T and sysrq-P debug output.
18
19 This option will slow down process creation somewhat.
20
21config DEBUG_PAGEALLOC 12config DEBUG_PAGEALLOC
22 bool "Debug page memory allocations" 13 bool "Debug page memory allocations"
23 depends on DEBUG_KERNEL && BROKEN 14 depends on DEBUG_KERNEL && BROKEN
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index e67ded1aab91..8accc1bb0263 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -94,8 +94,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
94 94
95#define NO_PROC_ID (0xff) /* No processor magic marker */ 95#define NO_PROC_ID (0xff) /* No processor magic marker */
96 96
97#define PROC_CHANGE_PENALTY (15) /* Schedule penalty */
98
99/* 97/*
100 * M32R-mp IPI 98 * M32R-mp IPI
101 */ 99 */
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index 5d2858f6eede..2c468e8b5853 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -149,6 +149,7 @@ unsigned long __init zone_sizes_init(void)
149 zholes_size[ZONE_DMA] = mp->holes; 149 zholes_size[ZONE_DMA] = mp->holes;
150 holes += zholes_size[ZONE_DMA]; 150 holes += zholes_size[ZONE_DMA];
151 151
152 node_set_state(nid, N_NORMAL_MEMORY);
152 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 153 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
153 } 154 }
154 155
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 73e2205ebf5a..78b660e903da 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -35,8 +35,6 @@ extern char __init_begin, __init_end;
35 35
36pgd_t swapper_pg_dir[1024]; 36pgd_t swapper_pg_dir[1024];
37 37
38DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
39
40/* 38/*
41 * Cache of MMU context last used. 39 * Cache of MMU context last used.
42 */ 40 */
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index 8bc842554e5b..9113c2f17607 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -32,8 +32,6 @@
32#include <asm/sections.h> 32#include <asm/sections.h>
33#include <asm/tlb.h> 33#include <asm/tlb.h>
34 34
35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36
37pg_data_t pg_data_map[MAX_NUMNODES]; 35pg_data_t pg_data_map[MAX_NUMNODES];
38EXPORT_SYMBOL(pg_data_map); 36EXPORT_SYMBOL(pg_data_map);
39 37
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index c8437866d3b7..213f2d671669 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -32,8 +32,6 @@ unsigned int __page_offset;
32EXPORT_SYMBOL(__page_offset); 32EXPORT_SYMBOL(__page_offset);
33 33
34#else 34#else
35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36
37static int init_bootmem_done; 35static int init_bootmem_done;
38#endif /* CONFIG_MMU */ 36#endif /* CONFIG_MMU */
39 37
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 5358f90b4dd2..83ed00a5644a 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -76,15 +76,6 @@ config DEBUG_STACKOVERFLOW
76 provides another way to check stack overflow happened on kernel mode 76 provides another way to check stack overflow happened on kernel mode
77 stack usually caused by nested interruption. 77 stack usually caused by nested interruption.
78 78
79config DEBUG_STACK_USAGE
80 bool "Enable stack utilization instrumentation"
81 depends on DEBUG_KERNEL
82 help
83 Enables the display of the minimum amount of free stack which each
84 task has ever had available in the sysrq-T and sysrq-P debug output.
85
86 This option will slow down process creation somewhat.
87
88config SMTC_IDLE_HOOK_DEBUG 79config SMTC_IDLE_HOOK_DEBUG
89 bool "Enable additional debug checks before going into CPU idle loop" 80 bool "Enable additional debug checks before going into CPU idle loop"
90 depends on DEBUG_KERNEL && MIPS_MT_SMTC 81 depends on DEBUG_KERNEL && MIPS_MT_SMTC
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 279599e9a779..1aadeb42c5a5 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -64,8 +64,6 @@
64 64
65#endif /* CONFIG_MIPS_MT_SMTC */ 65#endif /* CONFIG_MIPS_MT_SMTC */
66 66
67DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
68
69/* 67/*
70 * We have up to 8 empty zeroed pages so we can map one of the right colour 68 * We have up to 8 empty zeroed pages so we can map one of the right colour
71 * when needed. This is necessary only on R4000 / R4400 SC and MC versions 69 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 86af0d7d0771..2623d19f4f4c 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -87,7 +87,7 @@ static void mn10300_cpupic_mask_ack(struct irq_data *d)
87 tmp2 = GxICR(irq); 87 tmp2 = GxICR(irq);
88 88
89 irq_affinity_online[irq] = 89 irq_affinity_online[irq] =
90 any_online_cpu(*d->affinity); 90 cpumask_any_and(d->affinity, cpu_online_mask);
91 CROSS_GxICR(irq, irq_affinity_online[irq]) = 91 CROSS_GxICR(irq, irq_affinity_online[irq]) =
92 (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; 92 (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
93 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); 93 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
@@ -124,7 +124,8 @@ static void mn10300_cpupic_unmask_clear(struct irq_data *d)
124 } else { 124 } else {
125 tmp = GxICR(irq); 125 tmp = GxICR(irq);
126 126
127 irq_affinity_online[irq] = any_online_cpu(*d->affinity); 127 irq_affinity_online[irq] = cpumask_any_and(d->affinity,
128 cpu_online_mask);
128 CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; 129 CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
129 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); 130 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
130 } 131 }
@@ -366,11 +367,11 @@ void migrate_irqs(void)
366 if (irqd_is_per_cpu(data)) 367 if (irqd_is_per_cpu(data))
367 continue; 368 continue;
368 369
369 if (cpu_isset(self, data->affinity) && 370 if (cpumask_test_cpu(self, &data->affinity) &&
370 !cpus_intersects(irq_affinity[irq], cpu_online_map)) { 371 !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
371 int cpu_id; 372 int cpu_id;
372 cpu_id = first_cpu(cpu_online_map); 373 cpu_id = cpumask_first(cpu_online_mask);
373 cpu_set(cpu_id, data->affinity); 374 cpumask_set_cpu(cpu_id, &data->affinity);
374 } 375 }
375 /* We need to operate irq_affinity_online atomically. */ 376 /* We need to operate irq_affinity_online atomically. */
376 arch_local_cli_save(flags); 377 arch_local_cli_save(flags);
@@ -381,7 +382,8 @@ void migrate_irqs(void)
381 GxICR(irq) = x & GxICR_LEVEL; 382 GxICR(irq) = x & GxICR_LEVEL;
382 tmp = GxICR(irq); 383 tmp = GxICR(irq);
383 384
384 new = any_online_cpu(data->affinity); 385 new = cpumask_any_and(&data->affinity,
386 cpu_online_mask);
385 irq_affinity_online[irq] = new; 387 irq_affinity_online[irq] = new;
386 388
387 CROSS_GxICR(irq, new) = 389 CROSS_GxICR(irq, new) =
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 83fb27912231..9242e9fcc564 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -309,7 +309,7 @@ static void send_IPI_mask(const cpumask_t *cpumask, int irq)
309 u16 tmp; 309 u16 tmp;
310 310
311 for (i = 0; i < NR_CPUS; i++) { 311 for (i = 0; i < NR_CPUS; i++) {
312 if (cpu_isset(i, *cpumask)) { 312 if (cpumask_test_cpu(i, cpumask)) {
313 /* send IPI */ 313 /* send IPI */
314 tmp = CROSS_GxICR(irq, i); 314 tmp = CROSS_GxICR(irq, i);
315 CROSS_GxICR(irq, i) = 315 CROSS_GxICR(irq, i) =
@@ -342,8 +342,8 @@ void send_IPI_allbutself(int irq)
342{ 342{
343 cpumask_t cpumask; 343 cpumask_t cpumask;
344 344
345 cpumask = cpu_online_map; 345 cpumask_copy(&cpumask, cpu_online_mask);
346 cpu_clear(smp_processor_id(), cpumask); 346 cpumask_clear_cpu(smp_processor_id(), &cpumask);
347 send_IPI_mask(&cpumask, irq); 347 send_IPI_mask(&cpumask, irq);
348} 348}
349 349
@@ -393,8 +393,8 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
393 393
394 data.func = func; 394 data.func = func;
395 data.info = info; 395 data.info = info;
396 data.started = cpu_online_map; 396 cpumask_copy(&data.started, cpu_online_mask);
397 cpu_clear(smp_processor_id(), data.started); 397 cpumask_clear_cpu(smp_processor_id(), &data.started);
398 data.wait = wait; 398 data.wait = wait;
399 if (wait) 399 if (wait)
400 data.finished = data.started; 400 data.finished = data.started;
@@ -410,14 +410,14 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
410 if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) { 410 if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
411 for (cnt = 0; 411 for (cnt = 0;
412 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && 412 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
413 !cpus_empty(data.started); 413 !cpumask_empty(&data.started);
414 cnt++) 414 cnt++)
415 mdelay(1); 415 mdelay(1);
416 416
417 if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) { 417 if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
418 for (cnt = 0; 418 for (cnt = 0;
419 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT && 419 cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
420 !cpus_empty(data.finished); 420 !cpumask_empty(&data.finished);
421 cnt++) 421 cnt++)
422 mdelay(1); 422 mdelay(1);
423 } 423 }
@@ -428,10 +428,10 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
428 } else { 428 } else {
429 /* If timeout value is zero, wait until cpumask has been 429 /* If timeout value is zero, wait until cpumask has been
430 * cleared */ 430 * cleared */
431 while (!cpus_empty(data.started)) 431 while (!cpumask_empty(&data.started))
432 barrier(); 432 barrier();
433 if (wait) 433 if (wait)
434 while (!cpus_empty(data.finished)) 434 while (!cpumask_empty(&data.finished))
435 barrier(); 435 barrier();
436 } 436 }
437 437
@@ -472,12 +472,12 @@ void stop_this_cpu(void *unused)
472#endif /* CONFIG_GDBSTUB */ 472#endif /* CONFIG_GDBSTUB */
473 473
474 flags = arch_local_cli_save(); 474 flags = arch_local_cli_save();
475 cpu_clear(smp_processor_id(), cpu_online_map); 475 set_cpu_online(smp_processor_id(), false);
476 476
477 while (!stopflag) 477 while (!stopflag)
478 cpu_relax(); 478 cpu_relax();
479 479
480 cpu_set(smp_processor_id(), cpu_online_map); 480 set_cpu_online(smp_processor_id(), true);
481 arch_local_irq_restore(flags); 481 arch_local_irq_restore(flags);
482} 482}
483 483
@@ -529,12 +529,13 @@ void smp_nmi_call_function_interrupt(void)
529 * execute the function 529 * execute the function
530 */ 530 */
531 smp_mb(); 531 smp_mb();
532 cpu_clear(smp_processor_id(), nmi_call_data->started); 532 cpumask_clear_cpu(smp_processor_id(), &nmi_call_data->started);
533 (*func)(info); 533 (*func)(info);
534 534
535 if (wait) { 535 if (wait) {
536 smp_mb(); 536 smp_mb();
537 cpu_clear(smp_processor_id(), nmi_call_data->finished); 537 cpumask_clear_cpu(smp_processor_id(),
538 &nmi_call_data->finished);
538 } 539 }
539} 540}
540 541
@@ -657,7 +658,7 @@ int __init start_secondary(void *unused)
657{ 658{
658 smp_cpu_init(); 659 smp_cpu_init();
659 smp_callin(); 660 smp_callin();
660 while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) 661 while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
661 cpu_relax(); 662 cpu_relax();
662 663
663 local_flush_tlb(); 664 local_flush_tlb();
@@ -780,13 +781,14 @@ static int __init do_boot_cpu(int phy_id)
780 781
781 if (send_status == 0) { 782 if (send_status == 0) {
782 /* Allow AP to start initializing */ 783 /* Allow AP to start initializing */
783 cpu_set(cpu_id, cpu_callout_map); 784 cpumask_set_cpu(cpu_id, &cpu_callout_map);
784 785
785 /* Wait for setting cpu_callin_map */ 786 /* Wait for setting cpu_callin_map */
786 timeout = 0; 787 timeout = 0;
787 do { 788 do {
788 udelay(1000); 789 udelay(1000);
789 callin_status = cpu_isset(cpu_id, cpu_callin_map); 790 callin_status = cpumask_test_cpu(cpu_id,
791 &cpu_callin_map);
790 } while (callin_status == 0 && timeout++ < 5000); 792 } while (callin_status == 0 && timeout++ < 5000);
791 793
792 if (callin_status == 0) 794 if (callin_status == 0)
@@ -796,9 +798,9 @@ static int __init do_boot_cpu(int phy_id)
796 } 798 }
797 799
798 if (send_status == GxICR_REQUEST || callin_status == 0) { 800 if (send_status == GxICR_REQUEST || callin_status == 0) {
799 cpu_clear(cpu_id, cpu_callout_map); 801 cpumask_clear_cpu(cpu_id, &cpu_callout_map);
800 cpu_clear(cpu_id, cpu_callin_map); 802 cpumask_clear_cpu(cpu_id, &cpu_callin_map);
801 cpu_clear(cpu_id, cpu_initialized); 803 cpumask_clear_cpu(cpu_id, &cpu_initialized);
802 cpucount--; 804 cpucount--;
803 return 1; 805 return 1;
804 } 806 }
@@ -833,7 +835,7 @@ static void __init smp_callin(void)
833 cpu = smp_processor_id(); 835 cpu = smp_processor_id();
834 timeout = jiffies + (2 * HZ); 836 timeout = jiffies + (2 * HZ);
835 837
836 if (cpu_isset(cpu, cpu_callin_map)) { 838 if (cpumask_test_cpu(cpu, &cpu_callin_map)) {
837 printk(KERN_ERR "CPU#%d already present.\n", cpu); 839 printk(KERN_ERR "CPU#%d already present.\n", cpu);
838 BUG(); 840 BUG();
839 } 841 }
@@ -841,7 +843,7 @@ static void __init smp_callin(void)
841 843
842 /* Wait for AP startup 2s total */ 844 /* Wait for AP startup 2s total */
843 while (time_before(jiffies, timeout)) { 845 while (time_before(jiffies, timeout)) {
844 if (cpu_isset(cpu, cpu_callout_map)) 846 if (cpumask_test_cpu(cpu, &cpu_callout_map))
845 break; 847 break;
846 cpu_relax(); 848 cpu_relax();
847 } 849 }
@@ -861,11 +863,11 @@ static void __init smp_callin(void)
861 smp_store_cpu_info(cpu); 863 smp_store_cpu_info(cpu);
862 864
863 /* Allow the boot processor to continue */ 865 /* Allow the boot processor to continue */
864 cpu_set(cpu, cpu_callin_map); 866 cpumask_set_cpu(cpu, &cpu_callin_map);
865} 867}
866 868
867/** 869/**
868 * smp_online - Set cpu_online_map 870 * smp_online - Set cpu_online_mask
869 */ 871 */
870static void __init smp_online(void) 872static void __init smp_online(void)
871{ 873{
@@ -875,7 +877,7 @@ static void __init smp_online(void)
875 877
876 local_irq_enable(); 878 local_irq_enable();
877 879
878 cpu_set(cpu, cpu_online_map); 880 set_cpu_online(cpu, true);
879 smp_wmb(); 881 smp_wmb();
880} 882}
881 883
@@ -892,13 +894,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
892/* 894/*
893 * smp_prepare_boot_cpu - Set up stuff for the boot processor. 895 * smp_prepare_boot_cpu - Set up stuff for the boot processor.
894 * 896 *
895 * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot 897 * Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot
896 * processor (CPU 0). 898 * processor (CPU 0).
897 */ 899 */
898void __devinit smp_prepare_boot_cpu(void) 900void __devinit smp_prepare_boot_cpu(void)
899{ 901{
900 cpu_set(0, cpu_callout_map); 902 cpumask_set_cpu(0, &cpu_callout_map);
901 cpu_set(0, cpu_callin_map); 903 cpumask_set_cpu(0, &cpu_callin_map);
902 current_thread_info()->cpu = 0; 904 current_thread_info()->cpu = 0;
903} 905}
904 906
@@ -931,16 +933,16 @@ int __devinit __cpu_up(unsigned int cpu)
931 run_wakeup_cpu(cpu); 933 run_wakeup_cpu(cpu);
932#endif /* CONFIG_HOTPLUG_CPU */ 934#endif /* CONFIG_HOTPLUG_CPU */
933 935
934 cpu_set(cpu, smp_commenced_mask); 936 cpumask_set_cpu(cpu, &smp_commenced_mask);
935 937
936 /* Wait 5s total for a response */ 938 /* Wait 5s total for a response */
937 for (timeout = 0 ; timeout < 5000 ; timeout++) { 939 for (timeout = 0 ; timeout < 5000 ; timeout++) {
938 if (cpu_isset(cpu, cpu_online_map)) 940 if (cpu_online(cpu))
939 break; 941 break;
940 udelay(1000); 942 udelay(1000);
941 } 943 }
942 944
943 BUG_ON(!cpu_isset(cpu, cpu_online_map)); 945 BUG_ON(!cpu_online(cpu));
944 return 0; 946 return 0;
945} 947}
946 948
@@ -986,7 +988,7 @@ int __cpu_disable(void)
986 return -EBUSY; 988 return -EBUSY;
987 989
988 migrate_irqs(); 990 migrate_irqs();
989 cpu_clear(cpu, current->active_mm->cpu_vm_mask); 991 cpumask_clear_cpu(cpu, &mm_cpumask(current->active_mm));
990 return 0; 992 return 0;
991} 993}
992 994
@@ -1091,13 +1093,13 @@ static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
1091 do { 1093 do {
1092 mn10300_local_dcache_inv_range(start, end); 1094 mn10300_local_dcache_inv_range(start, end);
1093 barrier(); 1095 barrier();
1094 } while (!cpus_empty(nmi_call_func_mask_data.started)); 1096 } while (!cpumask_empty(&nmi_call_func_mask_data.started));
1095 1097
1096 if (wait) { 1098 if (wait) {
1097 do { 1099 do {
1098 mn10300_local_dcache_inv_range(start, end); 1100 mn10300_local_dcache_inv_range(start, end);
1099 barrier(); 1101 barrier();
1100 } while (!cpus_empty(nmi_call_func_mask_data.finished)); 1102 } while (!cpumask_empty(&nmi_call_func_mask_data.finished));
1101 } 1103 }
1102 1104
1103 spin_unlock(&smp_nmi_call_lock); 1105 spin_unlock(&smp_nmi_call_lock);
@@ -1108,9 +1110,9 @@ static void restart_wakeup_cpu(void)
1108{ 1110{
1109 unsigned int cpu = smp_processor_id(); 1111 unsigned int cpu = smp_processor_id();
1110 1112
1111 cpu_set(cpu, cpu_callin_map); 1113 cpumask_set_cpu(cpu, &cpu_callin_map);
1112 local_flush_tlb(); 1114 local_flush_tlb();
1113 cpu_set(cpu, cpu_online_map); 1115 set_cpu_online(cpu, true);
1114 smp_wmb(); 1116 smp_wmb();
1115} 1117}
1116 1118
@@ -1141,8 +1143,9 @@ static void sleep_cpu(void *unused)
1141static void run_sleep_cpu(unsigned int cpu) 1143static void run_sleep_cpu(unsigned int cpu)
1142{ 1144{
1143 unsigned long flags; 1145 unsigned long flags;
1144 cpumask_t cpumask = cpumask_of(cpu); 1146 cpumask_t cpumask;
1145 1147
1148 cpumask_copy(&cpumask, &cpumask_of(cpu));
1146 flags = arch_local_cli_save(); 1149 flags = arch_local_cli_save();
1147 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1); 1150 hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
1148 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0); 1151 hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
index 4a6e9a4b5b27..2d23b9eeee62 100644
--- a/arch/mn10300/mm/cache-smp.c
+++ b/arch/mn10300/mm/cache-smp.c
@@ -74,7 +74,7 @@ void smp_cache_interrupt(void)
74 break; 74 break;
75 } 75 }
76 76
77 cpu_clear(smp_processor_id(), smp_cache_ipi_map); 77 cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map);
78} 78}
79 79
80/** 80/**
@@ -94,12 +94,12 @@ void smp_cache_call(unsigned long opr_mask,
94 smp_cache_mask = opr_mask; 94 smp_cache_mask = opr_mask;
95 smp_cache_start = start; 95 smp_cache_start = start;
96 smp_cache_end = end; 96 smp_cache_end = end;
97 smp_cache_ipi_map = cpu_online_map; 97 cpumask_copy(&smp_cache_ipi_map, cpu_online_mask);
98 cpu_clear(smp_processor_id(), smp_cache_ipi_map); 98 cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map);
99 99
100 send_IPI_allbutself(FLUSH_CACHE_IPI); 100 send_IPI_allbutself(FLUSH_CACHE_IPI);
101 101
102 while (!cpus_empty(smp_cache_ipi_map)) 102 while (!cpumask_empty(&smp_cache_ipi_map))
103 /* nothing. lockup detection does not belong here */ 103 /* nothing. lockup detection does not belong here */
104 mb(); 104 mb();
105} 105}
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 48907cc3bdb7..13801824e3ee 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -37,8 +37,6 @@
37#include <asm/tlb.h> 37#include <asm/tlb.h>
38#include <asm/sections.h> 38#include <asm/sections.h>
39 39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
42unsigned long highstart_pfn, highend_pfn; 40unsigned long highstart_pfn, highend_pfn;
43 41
44#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT 42#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index 0b6a5ad1960e..9a777498a916 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -64,7 +64,7 @@ void smp_flush_tlb(void *unused)
64 64
65 cpu_id = get_cpu(); 65 cpu_id = get_cpu();
66 66
67 if (!cpu_isset(cpu_id, flush_cpumask)) 67 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
68 /* This was a BUG() but until someone can quote me the line 68 /* This was a BUG() but until someone can quote me the line
69 * from the intel manual that guarantees an IPI to multiple 69 * from the intel manual that guarantees an IPI to multiple
70 * CPUs is retried _only_ on the erroring CPUs its staying as a 70 * CPUs is retried _only_ on the erroring CPUs its staying as a
@@ -80,7 +80,7 @@ void smp_flush_tlb(void *unused)
80 local_flush_tlb_page(flush_mm, flush_va); 80 local_flush_tlb_page(flush_mm, flush_va);
81 81
82 smp_mb__before_clear_bit(); 82 smp_mb__before_clear_bit();
83 cpu_clear(cpu_id, flush_cpumask); 83 cpumask_clear_cpu(cpu_id, &flush_cpumask);
84 smp_mb__after_clear_bit(); 84 smp_mb__after_clear_bit();
85out: 85out:
86 put_cpu(); 86 put_cpu();
@@ -103,11 +103,11 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
103 * - we do not send IPIs to as-yet unbooted CPUs. 103 * - we do not send IPIs to as-yet unbooted CPUs.
104 */ 104 */
105 BUG_ON(!mm); 105 BUG_ON(!mm);
106 BUG_ON(cpus_empty(cpumask)); 106 BUG_ON(cpumask_empty(&cpumask));
107 BUG_ON(cpu_isset(smp_processor_id(), cpumask)); 107 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
108 108
109 cpus_and(tmp, cpumask, cpu_online_map); 109 cpumask_and(&tmp, &cpumask, cpu_online_mask);
110 BUG_ON(!cpus_equal(cpumask, tmp)); 110 BUG_ON(!cpumask_equal(&cpumask, &tmp));
111 111
112 /* I'm not happy about this global shared spinlock in the MM hot path, 112 /* I'm not happy about this global shared spinlock in the MM hot path,
113 * but we'll see how contended it is. 113 * but we'll see how contended it is.
@@ -128,7 +128,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
128 /* FIXME: if NR_CPUS>=3, change send_IPI_mask */ 128 /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
129 smp_call_function(smp_flush_tlb, NULL, 1); 129 smp_call_function(smp_flush_tlb, NULL, 1);
130 130
131 while (!cpus_empty(flush_cpumask)) 131 while (!cpumask_empty(&flush_cpumask))
132 /* Lockup detection does not belong here */ 132 /* Lockup detection does not belong here */
133 smp_mb(); 133 smp_mb();
134 134
@@ -146,11 +146,11 @@ void flush_tlb_mm(struct mm_struct *mm)
146 cpumask_t cpu_mask; 146 cpumask_t cpu_mask;
147 147
148 preempt_disable(); 148 preempt_disable();
149 cpu_mask = mm->cpu_vm_mask; 149 cpumask_copy(&cpu_mask, mm_cpumask(mm));
150 cpu_clear(smp_processor_id(), cpu_mask); 150 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
151 151
152 local_flush_tlb(); 152 local_flush_tlb();
153 if (!cpus_empty(cpu_mask)) 153 if (!cpumask_empty(&cpu_mask))
154 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 154 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
155 155
156 preempt_enable(); 156 preempt_enable();
@@ -165,11 +165,11 @@ void flush_tlb_current_task(void)
165 cpumask_t cpu_mask; 165 cpumask_t cpu_mask;
166 166
167 preempt_disable(); 167 preempt_disable();
168 cpu_mask = mm->cpu_vm_mask; 168 cpumask_copy(&cpu_mask, mm_cpumask(mm));
169 cpu_clear(smp_processor_id(), cpu_mask); 169 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
170 170
171 local_flush_tlb(); 171 local_flush_tlb();
172 if (!cpus_empty(cpu_mask)) 172 if (!cpumask_empty(&cpu_mask))
173 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 173 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
174 174
175 preempt_enable(); 175 preempt_enable();
@@ -186,11 +186,11 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
186 cpumask_t cpu_mask; 186 cpumask_t cpu_mask;
187 187
188 preempt_disable(); 188 preempt_disable();
189 cpu_mask = mm->cpu_vm_mask; 189 cpumask_copy(&cpu_mask, mm_cpumask(mm));
190 cpu_clear(smp_processor_id(), cpu_mask); 190 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
191 191
192 local_flush_tlb_page(mm, va); 192 local_flush_tlb_page(mm, va);
193 if (!cpus_empty(cpu_mask)) 193 if (!cpumask_empty(&cpu_mask))
194 flush_tlb_others(cpu_mask, mm, va); 194 flush_tlb_others(cpu_mask, mm, va);
195 195
196 preempt_enable(); 196 preempt_enable();
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index 2e73623feb6b..e8f8037d872b 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -33,15 +33,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
33 33
34#endif /* !ASSEMBLY */ 34#endif /* !ASSEMBLY */
35 35
36/*
37 * This magic constant controls our willingness to transfer
38 * a process across CPUs. Such a transfer incurs cache and tlb
39 * misses. The current value is inherited from i386. Still needs
40 * to be tuned for parisc.
41 */
42
43#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
44
45#define raw_smp_processor_id() (current_thread_info()->cpu) 36#define raw_smp_processor_id() (current_thread_info()->cpu)
46 37
47#else /* CONFIG_SMP */ 38#else /* CONFIG_SMP */
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 5fa1e273006e..82f364e209fc 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -31,8 +31,6 @@
31#include <asm/mmzone.h> 31#include <asm/mmzone.h>
32#include <asm/sections.h> 32#include <asm/sections.h>
33 33
34DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
35
36extern int data_start; 34extern int data_start;
37 35
38#ifdef CONFIG_DISCONTIGMEM 36#ifdef CONFIG_DISCONTIGMEM
@@ -686,7 +684,7 @@ void show_mem(unsigned int filter)
686 int shared = 0, cached = 0; 684 int shared = 0, cached = 0;
687 685
688 printk(KERN_INFO "Mem-info:\n"); 686 printk(KERN_INFO "Mem-info:\n");
689 show_free_areas(); 687 show_free_areas(filter);
690#ifndef CONFIG_DISCONTIGMEM 688#ifndef CONFIG_DISCONTIGMEM
691 i = max_mapnr; 689 i = max_mapnr;
692 while (i-- > 0) { 690 while (i-- > 0) {
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a3128ca0fe11..423145a6f7ba 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -140,6 +140,7 @@ config PPC
140 select IRQ_PER_CPU 140 select IRQ_PER_CPU
141 select GENERIC_IRQ_SHOW 141 select GENERIC_IRQ_SHOW
142 select GENERIC_IRQ_SHOW_LEVEL 142 select GENERIC_IRQ_SHOW_LEVEL
143 select HAVE_RCU_TABLE_FREE if SMP
143 144
144config EARLY_PRINTK 145config EARLY_PRINTK
145 bool 146 bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index a597dd77b903..e72dcf6a421d 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -35,27 +35,6 @@ config DEBUG_STACKOVERFLOW
35 This option will cause messages to be printed if free stack space 35 This option will cause messages to be printed if free stack space
36 drops below a certain limit. 36 drops below a certain limit.
37 37
38config DEBUG_STACK_USAGE
39 bool "Stack utilization instrumentation"
40 depends on DEBUG_KERNEL
41 help
42 Enables the display of the minimum amount of free stack which each
43 task has ever had available in the sysrq-T and sysrq-P debug output.
44
45 This option will slow down process creation somewhat.
46
47config DEBUG_PER_CPU_MAPS
48 bool "Debug access to per_cpu maps"
49 depends on DEBUG_KERNEL
50 depends on SMP
51 default n
52 ---help---
53 Say Y to verify that the per_cpu map being accessed has
54 been setup. Adds a fair amount of code to kernel memory
55 and decreases performance.
56
57 Say N if unsure.
58
59config HCALL_STATS 38config HCALL_STATS
60 bool "Hypervisor call instrumentation" 39 bool "Hypervisor call instrumentation"
61 depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS 40 depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
index 761faa7b6964..ac1eb320c7b4 100644
--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
@@ -176,6 +176,19 @@
176 sleep = <&pmc 0x00300000>; 176 sleep = <&pmc 0x00300000>;
177 }; 177 };
178 178
179 ptp_clock@24E00 {
180 compatible = "fsl,etsec-ptp";
181 reg = <0x24E00 0xB0>;
182 interrupts = <12 0x8 13 0x8>;
183 interrupt-parent = < &ipic >;
184 fsl,tclk-period = <10>;
185 fsl,tmr-prsc = <100>;
186 fsl,tmr-add = <0x999999A4>;
187 fsl,tmr-fiper1 = <0x3B9AC9F6>;
188 fsl,tmr-fiper2 = <0x00018696>;
189 fsl,max-adj = <659999998>;
190 };
191
179 enet0: ethernet@24000 { 192 enet0: ethernet@24000 {
180 #address-cells = <1>; 193 #address-cells = <1>;
181 #size-cells = <1>; 194 #size-cells = <1>;
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index cafc1285c140..f6c04d25e916 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -324,6 +324,19 @@
324 }; 324 };
325 }; 325 };
326 326
327 ptp_clock@24E00 {
328 compatible = "fsl,etsec-ptp";
329 reg = <0x24E00 0xB0>;
330 interrupts = <68 2 69 2 70 2 71 2>;
331 interrupt-parent = < &mpic >;
332 fsl,tclk-period = <5>;
333 fsl,tmr-prsc = <200>;
334 fsl,tmr-add = <0xAAAAAAAB>;
335 fsl,tmr-fiper1 = <0x3B9AC9FB>;
336 fsl,tmr-fiper2 = <0x3B9AC9FB>;
337 fsl,max-adj = <499999999>;
338 };
339
327 enet0: ethernet@24000 { 340 enet0: ethernet@24000 {
328 #address-cells = <1>; 341 #address-cells = <1>;
329 #size-cells = <1>; 342 #size-cells = <1>;
diff --git a/arch/powerpc/boot/dts/p2020ds.dts b/arch/powerpc/boot/dts/p2020ds.dts
index 2bcf3683d223..dae403100f2f 100644
--- a/arch/powerpc/boot/dts/p2020ds.dts
+++ b/arch/powerpc/boot/dts/p2020ds.dts
@@ -178,6 +178,19 @@
178 178
179 }; 179 };
180 180
181 ptp_clock@24E00 {
182 compatible = "fsl,etsec-ptp";
183 reg = <0x24E00 0xB0>;
184 interrupts = <68 2 69 2 70 2>;
185 interrupt-parent = < &mpic >;
186 fsl,tclk-period = <5>;
187 fsl,tmr-prsc = <200>;
188 fsl,tmr-add = <0xCCCCCCCD>;
189 fsl,tmr-fiper1 = <0x3B9AC9FB>;
190 fsl,tmr-fiper2 = <0x0001869B>;
191 fsl,max-adj = <249999999>;
192 };
193
181 enet0: ethernet@24000 { 194 enet0: ethernet@24000 {
182 tbi-handle = <&tbi0>; 195 tbi-handle = <&tbi0>;
183 phy-handle = <&phy0>; 196 phy-handle = <&phy0>;
diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts
index 3782a58f13be..1d7a05f3021e 100644
--- a/arch/powerpc/boot/dts/p2020rdb.dts
+++ b/arch/powerpc/boot/dts/p2020rdb.dts
@@ -224,6 +224,19 @@
224 status = "disabled"; 224 status = "disabled";
225 }; 225 };
226 226
227 ptp_clock@24E00 {
228 compatible = "fsl,etsec-ptp";
229 reg = <0x24E00 0xB0>;
230 interrupts = <68 2 69 2 70 2>;
231 interrupt-parent = < &mpic >;
232 fsl,tclk-period = <5>;
233 fsl,tmr-prsc = <200>;
234 fsl,tmr-add = <0xCCCCCCCD>;
235 fsl,tmr-fiper1 = <0x3B9AC9FB>;
236 fsl,tmr-fiper2 = <0x0001869B>;
237 fsl,max-adj = <249999999>;
238 };
239
227 enet0: ethernet@24000 { 240 enet0: ethernet@24000 {
228 fixed-link = <1 1 1000 0 0>; 241 fixed-link = <1 1 1000 0 0>;
229 phy-connection-type = "rgmii-id"; 242 phy-connection-type = "rgmii-id";
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index abe8532bd14e..bf301ac62f35 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -31,14 +31,29 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
31#endif 31#endif
32 32
33#ifdef CONFIG_SMP 33#ifdef CONFIG_SMP
34extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift); 34struct mmu_gather;
35extern void pte_free_finish(void); 35extern void tlb_remove_table(struct mmu_gather *, void *);
36
37static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
38{
39 unsigned long pgf = (unsigned long)table;
40 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
41 pgf |= shift;
42 tlb_remove_table(tlb, (void *)pgf);
43}
44
45static inline void __tlb_remove_table(void *_table)
46{
47 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
48 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
49
50 pgtable_free(table, shift);
51}
36#else /* CONFIG_SMP */ 52#else /* CONFIG_SMP */
37static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) 53static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
38{ 54{
39 pgtable_free(table, shift); 55 pgtable_free(table, shift);
40} 56}
41static inline void pte_free_finish(void) { }
42#endif /* !CONFIG_SMP */ 57#endif /* !CONFIG_SMP */
43 58
44static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, 59static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index d8529ef13b23..37c353e8af7c 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -139,10 +139,12 @@ static inline struct thread_info *current_thread_info(void)
139#define TLF_NAPPING 0 /* idle thread enabled NAP mode */ 139#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
140#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ 140#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
141#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */ 141#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */
142#define TLF_LAZY_MMU 3 /* tlb_batch is active */
142 143
143#define _TLF_NAPPING (1 << TLF_NAPPING) 144#define _TLF_NAPPING (1 << TLF_NAPPING)
144#define _TLF_SLEEPING (1 << TLF_SLEEPING) 145#define _TLF_SLEEPING (1 << TLF_SLEEPING)
145#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK) 146#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK)
147#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU)
146 148
147#ifndef __ASSEMBLY__ 149#ifndef __ASSEMBLY__
148#define HAVE_SET_RESTORE_SIGMASK 1 150#define HAVE_SET_RESTORE_SIGMASK 1
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 095043d79946..91e52df3d81d 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -395,6 +395,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
395 struct thread_struct *new_thread, *old_thread; 395 struct thread_struct *new_thread, *old_thread;
396 unsigned long flags; 396 unsigned long flags;
397 struct task_struct *last; 397 struct task_struct *last;
398#ifdef CONFIG_PPC_BOOK3S_64
399 struct ppc64_tlb_batch *batch;
400#endif
398 401
399#ifdef CONFIG_SMP 402#ifdef CONFIG_SMP
400 /* avoid complexity of lazy save/restore of fpu 403 /* avoid complexity of lazy save/restore of fpu
@@ -513,7 +516,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
513 old_thread->accum_tb += (current_tb - start_tb); 516 old_thread->accum_tb += (current_tb - start_tb);
514 new_thread->start_tb = current_tb; 517 new_thread->start_tb = current_tb;
515 } 518 }
516#endif 519#endif /* CONFIG_PPC64 */
520
521#ifdef CONFIG_PPC_BOOK3S_64
522 batch = &__get_cpu_var(ppc64_tlb_batch);
523 if (batch->active) {
524 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
525 if (batch->index)
526 __flush_tlb_pending(batch);
527 batch->active = 0;
528 }
529#endif /* CONFIG_PPC_BOOK3S_64 */
517 530
518 local_irq_save(flags); 531 local_irq_save(flags);
519 532
@@ -528,6 +541,14 @@ struct task_struct *__switch_to(struct task_struct *prev,
528 hard_irq_disable(); 541 hard_irq_disable();
529 last = _switch(old_thread, new_thread); 542 last = _switch(old_thread, new_thread);
530 543
544#ifdef CONFIG_PPC_BOOK3S_64
545 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
546 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
547 batch = &__get_cpu_var(ppc64_tlb_batch);
548 batch->active = 1;
549 }
550#endif /* CONFIG_PPC_BOOK3S_64 */
551
531 local_irq_restore(flags); 552 local_irq_restore(flags);
532 553
533 return last; 554 return last;
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 6a3997f98dfb..af40c8768a78 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -33,110 +33,6 @@
33 33
34#include "mmu_decl.h" 34#include "mmu_decl.h"
35 35
36DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
37
38#ifdef CONFIG_SMP
39
40/*
41 * Handle batching of page table freeing on SMP. Page tables are
42 * queued up and send to be freed later by RCU in order to avoid
43 * freeing a page table page that is being walked without locks
44 */
45
46static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
47static unsigned long pte_freelist_forced_free;
48
49struct pte_freelist_batch
50{
51 struct rcu_head rcu;
52 unsigned int index;
53 unsigned long tables[0];
54};
55
56#define PTE_FREELIST_SIZE \
57 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
58 / sizeof(unsigned long))
59
60static void pte_free_smp_sync(void *arg)
61{
62 /* Do nothing, just ensure we sync with all CPUs */
63}
64
65/* This is only called when we are critically out of memory
66 * (and fail to get a page in pte_free_tlb).
67 */
68static void pgtable_free_now(void *table, unsigned shift)
69{
70 pte_freelist_forced_free++;
71
72 smp_call_function(pte_free_smp_sync, NULL, 1);
73
74 pgtable_free(table, shift);
75}
76
77static void pte_free_rcu_callback(struct rcu_head *head)
78{
79 struct pte_freelist_batch *batch =
80 container_of(head, struct pte_freelist_batch, rcu);
81 unsigned int i;
82
83 for (i = 0; i < batch->index; i++) {
84 void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE);
85 unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE;
86
87 pgtable_free(table, shift);
88 }
89
90 free_page((unsigned long)batch);
91}
92
93static void pte_free_submit(struct pte_freelist_batch *batch)
94{
95 call_rcu_sched(&batch->rcu, pte_free_rcu_callback);
96}
97
98void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
99{
100 /* This is safe since tlb_gather_mmu has disabled preemption */
101 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
102 unsigned long pgf;
103
104 if (atomic_read(&tlb->mm->mm_users) < 2 ||
105 cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
106 pgtable_free(table, shift);
107 return;
108 }
109
110 if (*batchp == NULL) {
111 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
112 if (*batchp == NULL) {
113 pgtable_free_now(table, shift);
114 return;
115 }
116 (*batchp)->index = 0;
117 }
118 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
119 pgf = (unsigned long)table | shift;
120 (*batchp)->tables[(*batchp)->index++] = pgf;
121 if ((*batchp)->index == PTE_FREELIST_SIZE) {
122 pte_free_submit(*batchp);
123 *batchp = NULL;
124 }
125}
126
127void pte_free_finish(void)
128{
129 /* This is safe since tlb_gather_mmu has disabled preemption */
130 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
131
132 if (*batchp == NULL)
133 return;
134 pte_free_submit(*batchp);
135 *batchp = NULL;
136}
137
138#endif /* CONFIG_SMP */
139
140static inline int is_exec_fault(void) 36static inline int is_exec_fault(void)
141{ 37{
142 return current->thread.regs && TRAP(current->thread.regs) == 0x400; 38 return current->thread.regs && TRAP(current->thread.regs) == 0x400;
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index 690566b66e8e..27b863c14941 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -71,9 +71,6 @@ void tlb_flush(struct mmu_gather *tlb)
71 */ 71 */
72 _tlbia(); 72 _tlbia();
73 } 73 }
74
75 /* Push out batch of freed page tables */
76 pte_free_finish();
77} 74}
78 75
79/* 76/*
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index c14d09f614f3..31f18207970b 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -155,7 +155,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
155 155
156void tlb_flush(struct mmu_gather *tlb) 156void tlb_flush(struct mmu_gather *tlb)
157{ 157{
158 struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); 158 struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
159 159
160 /* If there's a TLB batch pending, then we must flush it because the 160 /* If there's a TLB batch pending, then we must flush it because the
161 * pages are going to be freed and we really don't want to have a CPU 161 * pages are going to be freed and we really don't want to have a CPU
@@ -164,8 +164,7 @@ void tlb_flush(struct mmu_gather *tlb)
164 if (tlbbatch->index) 164 if (tlbbatch->index)
165 __flush_tlb_pending(tlbbatch); 165 __flush_tlb_pending(tlbbatch);
166 166
167 /* Push out batch of freed page tables */ 167 put_cpu_var(ppc64_tlb_batch);
168 pte_free_finish();
169} 168}
170 169
171/** 170/**
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 2a030d89bbc6..0bdad3aecc67 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -299,9 +299,6 @@ EXPORT_SYMBOL(flush_tlb_range);
299void tlb_flush(struct mmu_gather *tlb) 299void tlb_flush(struct mmu_gather *tlb)
300{ 300{
301 flush_tlb_mm(tlb->mm); 301 flush_tlb_mm(tlb->mm);
302
303 /* Push out batch of freed page tables */
304 pte_free_finish();
305} 302}
306 303
307/* 304/*
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 9074a54c4d10..77eee5477a52 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -29,65 +29,77 @@
29#include <asm/smp.h> 29#include <asm/smp.h>
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31 31
32#ifndef CONFIG_SMP
33#define TLB_NR_PTRS 1
34#else
35#define TLB_NR_PTRS 508
36#endif
37
38struct mmu_gather { 32struct mmu_gather {
39 struct mm_struct *mm; 33 struct mm_struct *mm;
40 unsigned int fullmm; 34 unsigned int fullmm;
41 unsigned int nr_ptes; 35 unsigned int nr_ptes;
42 unsigned int nr_pxds; 36 unsigned int nr_pxds;
43 void *array[TLB_NR_PTRS]; 37 unsigned int max;
38 void **array;
39 void *local[8];
44}; 40};
45 41
46DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 42static inline void __tlb_alloc_page(struct mmu_gather *tlb)
47
48static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
49 unsigned int full_mm_flush)
50{ 43{
51 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 44 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
52 45
46 if (addr) {
47 tlb->array = (void *) addr;
48 tlb->max = PAGE_SIZE / sizeof(void *);
49 }
50}
51
52static inline void tlb_gather_mmu(struct mmu_gather *tlb,
53 struct mm_struct *mm,
54 unsigned int full_mm_flush)
55{
53 tlb->mm = mm; 56 tlb->mm = mm;
57 tlb->max = ARRAY_SIZE(tlb->local);
58 tlb->array = tlb->local;
54 tlb->fullmm = full_mm_flush; 59 tlb->fullmm = full_mm_flush;
55 tlb->nr_ptes = 0;
56 tlb->nr_pxds = TLB_NR_PTRS;
57 if (tlb->fullmm) 60 if (tlb->fullmm)
58 __tlb_flush_mm(mm); 61 __tlb_flush_mm(mm);
59 return tlb; 62 else
63 __tlb_alloc_page(tlb);
64 tlb->nr_ptes = 0;
65 tlb->nr_pxds = tlb->max;
60} 66}
61 67
62static inline void tlb_flush_mmu(struct mmu_gather *tlb, 68static inline void tlb_flush_mmu(struct mmu_gather *tlb)
63 unsigned long start, unsigned long end)
64{ 69{
65 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS)) 70 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max))
66 __tlb_flush_mm(tlb->mm); 71 __tlb_flush_mm(tlb->mm);
67 while (tlb->nr_ptes > 0) 72 while (tlb->nr_ptes > 0)
68 page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); 73 page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]);
69 while (tlb->nr_pxds < TLB_NR_PTRS) 74 while (tlb->nr_pxds < tlb->max)
70 crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); 75 crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]);
71} 76}
72 77
73static inline void tlb_finish_mmu(struct mmu_gather *tlb, 78static inline void tlb_finish_mmu(struct mmu_gather *tlb,
74 unsigned long start, unsigned long end) 79 unsigned long start, unsigned long end)
75{ 80{
76 tlb_flush_mmu(tlb, start, end); 81 tlb_flush_mmu(tlb);
77 82
78 rcu_table_freelist_finish(); 83 rcu_table_freelist_finish();
79 84
80 /* keep the page table cache within bounds */ 85 /* keep the page table cache within bounds */
81 check_pgt_cache(); 86 check_pgt_cache();
82 87
83 put_cpu_var(mmu_gathers); 88 if (tlb->array != tlb->local)
89 free_pages((unsigned long) tlb->array, 0);
84} 90}
85 91
86/* 92/*
87 * Release the page cache reference for a pte removed by 93 * Release the page cache reference for a pte removed by
88 * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page 94 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
89 * has already been freed, so just do free_page_and_swap_cache. 95 * has already been freed, so just do free_page_and_swap_cache.
90 */ 96 */
97static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
98{
99 free_page_and_swap_cache(page);
100 return 1; /* avoid calling tlb_flush_mmu */
101}
102
91static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 103static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
92{ 104{
93 free_page_and_swap_cache(page); 105 free_page_and_swap_cache(page);
@@ -103,7 +115,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
103 if (!tlb->fullmm) { 115 if (!tlb->fullmm) {
104 tlb->array[tlb->nr_ptes++] = pte; 116 tlb->array[tlb->nr_ptes++] = pte;
105 if (tlb->nr_ptes >= tlb->nr_pxds) 117 if (tlb->nr_ptes >= tlb->nr_pxds)
106 tlb_flush_mmu(tlb, 0, 0); 118 tlb_flush_mmu(tlb);
107 } else 119 } else
108 page_table_free(tlb->mm, (unsigned long *) pte); 120 page_table_free(tlb->mm, (unsigned long *) pte);
109} 121}
@@ -124,7 +136,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
124 if (!tlb->fullmm) { 136 if (!tlb->fullmm) {
125 tlb->array[--tlb->nr_pxds] = pmd; 137 tlb->array[--tlb->nr_pxds] = pmd;
126 if (tlb->nr_ptes >= tlb->nr_pxds) 138 if (tlb->nr_ptes >= tlb->nr_pxds)
127 tlb_flush_mmu(tlb, 0, 0); 139 tlb_flush_mmu(tlb);
128 } else 140 } else
129 crst_table_free(tlb->mm, (unsigned long *) pmd); 141 crst_table_free(tlb->mm, (unsigned long *) pmd);
130#endif 142#endif
@@ -146,7 +158,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
146 if (!tlb->fullmm) { 158 if (!tlb->fullmm) {
147 tlb->array[--tlb->nr_pxds] = pud; 159 tlb->array[--tlb->nr_pxds] = pud;
148 if (tlb->nr_ptes >= tlb->nr_pxds) 160 if (tlb->nr_ptes >= tlb->nr_pxds)
149 tlb_flush_mmu(tlb, 0, 0); 161 tlb_flush_mmu(tlb);
150 } else 162 } else
151 crst_table_free(tlb->mm, (unsigned long *) pud); 163 crst_table_free(tlb->mm, (unsigned long *) pud);
152#endif 164#endif
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 8d4330642512..14c6fae6fe6b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -36,7 +36,6 @@ struct rcu_table_freelist {
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \ 36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long)) 37 / sizeof(unsigned long))
38 38
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); 39static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
41 40
42static void __page_table_free(struct mm_struct *mm, unsigned long *table); 41static void __page_table_free(struct mm_struct *mm, unsigned long *table);
diff --git a/arch/score/Kconfig.debug b/arch/score/Kconfig.debug
index 451ed54ce646..a1f346df0a71 100644
--- a/arch/score/Kconfig.debug
+++ b/arch/score/Kconfig.debug
@@ -16,15 +16,6 @@ config CMDLINE
16 other cases you can specify kernel args so that you don't have 16 other cases you can specify kernel args so that you don't have
17 to set them up in board prom initialization routines. 17 to set them up in board prom initialization routines.
18 18
19config DEBUG_STACK_USAGE
20 bool "Enable stack utilization instrumentation"
21 depends on DEBUG_KERNEL
22 help
23 Enables the display of the minimum amount of free stack which each
24 task has ever had available in the sysrq-T and sysrq-P debug output.
25
26 This option will slow down process creation somewhat.
27
28config RUNTIME_DEBUG 19config RUNTIME_DEBUG
29 bool "Enable run-time debugging" 20 bool "Enable run-time debugging"
30 depends on DEBUG_KERNEL 21 depends on DEBUG_KERNEL
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index 50fdec54c70a..cee6bce1e30c 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -38,8 +38,6 @@
38#include <asm/sections.h> 38#include <asm/sections.h>
39#include <asm/tlb.h> 39#include <asm/tlb.h>
40 40
41DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
42
43unsigned long empty_zero_page; 41unsigned long empty_zero_page;
44EXPORT_SYMBOL_GPL(empty_zero_page); 42EXPORT_SYMBOL_GPL(empty_zero_page);
45 43
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 1553d56cf4e0..c1d5a820b1aa 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -28,15 +28,6 @@ config STACK_DEBUG
28 every function call and will therefore incur a major 28 every function call and will therefore incur a major
29 performance hit. Most users should say N. 29 performance hit. Most users should say N.
30 30
31config DEBUG_STACK_USAGE
32 bool "Stack utilization instrumentation"
33 depends on DEBUG_KERNEL
34 help
35 Enables the display of the minimum amount of free stack which each
36 task has ever had available in the sysrq-T and sysrq-P debug output.
37
38 This option will slow down process creation somewhat.
39
40config 4KSTACKS 31config 4KSTACKS
41 bool "Use 4Kb for kernel stacks instead of 8Kb" 32 bool "Use 4Kb for kernel stacks instead of 8Kb"
42 depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB 33 depends on DEBUG_KERNEL && (MMU || BROKEN) && !PAGE_SIZE_64KB
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 75abb38dffd5..6c308d8b9a50 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -23,8 +23,6 @@ struct mmu_gather {
23 unsigned long start, end; 23 unsigned long start, end;
24}; 24};
25 25
26DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
27
28static inline void init_tlb_gather(struct mmu_gather *tlb) 26static inline void init_tlb_gather(struct mmu_gather *tlb)
29{ 27{
30 tlb->start = TASK_SIZE; 28 tlb->start = TASK_SIZE;
@@ -36,17 +34,13 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
36 } 34 }
37} 35}
38 36
39static inline struct mmu_gather * 37static inline void
40tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 38tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
41{ 39{
42 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
43
44 tlb->mm = mm; 40 tlb->mm = mm;
45 tlb->fullmm = full_mm_flush; 41 tlb->fullmm = full_mm_flush;
46 42
47 init_tlb_gather(tlb); 43 init_tlb_gather(tlb);
48
49 return tlb;
50} 44}
51 45
52static inline void 46static inline void
@@ -57,8 +51,6 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
57 51
58 /* keep the page table cache within bounds */ 52 /* keep the page table cache within bounds */
59 check_pgt_cache(); 53 check_pgt_cache();
60
61 put_cpu_var(mmu_gathers);
62} 54}
63 55
64static inline void 56static inline void
@@ -91,7 +83,21 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
91 } 83 }
92} 84}
93 85
94#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 86static inline void tlb_flush_mmu(struct mmu_gather *tlb)
87{
88}
89
90static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
91{
92 free_page_and_swap_cache(page);
93 return 1; /* avoid calling tlb_flush_mmu */
94}
95
96static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97{
98 __tlb_remove_page(tlb, page);
99}
100
95#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) 101#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
96#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 102#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
97#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) 103#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 0d3f912e3334..58a93fb3d965 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -28,7 +28,6 @@
28#include <asm/cache.h> 28#include <asm/cache.h>
29#include <asm/sizes.h> 29#include <asm/sizes.h>
30 30
31DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
32pgd_t swapper_pg_dir[PTRS_PER_PGD]; 31pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 32
34void __init generic_mem_init(void) 33void __init generic_mem_init(void)
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index d9a795efbc04..6db35fba79fd 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -6,15 +6,6 @@ config TRACE_IRQFLAGS_SUPPORT
6 6
7source "lib/Kconfig.debug" 7source "lib/Kconfig.debug"
8 8
9config DEBUG_STACK_USAGE
10 bool "Enable stack utilization instrumentation"
11 depends on DEBUG_KERNEL
12 help
13 Enables the display of the minimum amount of free stack which each
14 task has ever had available in the sysrq-T and sysrq-P debug output.
15
16 This option will slow down process creation somewhat.
17
18config DEBUG_DCFLUSH 9config DEBUG_DCFLUSH
19 bool "D-cache flush debugging" 10 bool "D-cache flush debugging"
20 depends on SPARC64 && DEBUG_KERNEL 11 depends on SPARC64 && DEBUG_KERNEL
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index 5bdfa2c6e400..4e5e0878144f 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -78,4 +78,7 @@ static inline void check_pgt_cache(void)
78 quicklist_trim(0, NULL, 25, 16); 78 quicklist_trim(0, NULL, 25, 16);
79} 79}
80 80
81#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
82#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
83
81#endif /* _SPARC64_PGALLOC_H */ 84#endif /* _SPARC64_PGALLOC_H */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index b77128c80524..1e03c5a6b4f7 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -655,9 +655,11 @@ static inline int pte_special(pte_t pte)
655#define pte_unmap(pte) do { } while (0) 655#define pte_unmap(pte) do { } while (0)
656 656
657/* Actual page table PTE updates. */ 657/* Actual page table PTE updates. */
658extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig); 658extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
659 pte_t *ptep, pte_t orig, int fullmm);
659 660
660static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) 661static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
662 pte_t *ptep, pte_t pte, int fullmm)
661{ 663{
662 pte_t orig = *ptep; 664 pte_t orig = *ptep;
663 665
@@ -670,12 +672,19 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
670 * and SUN4V pte layout, so this inline test is fine. 672 * and SUN4V pte layout, so this inline test is fine.
671 */ 673 */
672 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) 674 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
673 tlb_batch_add(mm, addr, ptep, orig); 675 tlb_batch_add(mm, addr, ptep, orig, fullmm);
674} 676}
675 677
678#define set_pte_at(mm,addr,ptep,pte) \
679 __set_pte_at((mm), (addr), (ptep), (pte), 0)
680
676#define pte_clear(mm,addr,ptep) \ 681#define pte_clear(mm,addr,ptep) \
677 set_pte_at((mm), (addr), (ptep), __pte(0UL)) 682 set_pte_at((mm), (addr), (ptep), __pte(0UL))
678 683
684#define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
685#define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
686 __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
687
679#ifdef DCACHE_ALIASING_POSSIBLE 688#ifdef DCACHE_ALIASING_POSSIBLE
680#define __HAVE_ARCH_MOVE_PTE 689#define __HAVE_ARCH_MOVE_PTE
681#define move_pte(pte, prot, old_addr, new_addr) \ 690#define move_pte(pte, prot, old_addr, new_addr) \
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
index dca406b9b6fc..190e18913cc6 100644
--- a/arch/sparc/include/asm/tlb_64.h
+++ b/arch/sparc/include/asm/tlb_64.h
@@ -7,66 +7,11 @@
7#include <asm/tlbflush.h> 7#include <asm/tlbflush.h>
8#include <asm/mmu_context.h> 8#include <asm/mmu_context.h>
9 9
10#define TLB_BATCH_NR 192
11
12/*
13 * For UP we don't need to worry about TLB flush
14 * and page free order so much..
15 */
16#ifdef CONFIG_SMP
17 #define FREE_PTE_NR 506
18 #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
19#else
20 #define FREE_PTE_NR 1
21 #define tlb_fast_mode(bp) 1
22#endif
23
24struct mmu_gather {
25 struct mm_struct *mm;
26 unsigned int pages_nr;
27 unsigned int need_flush;
28 unsigned int fullmm;
29 unsigned int tlb_nr;
30 unsigned long vaddrs[TLB_BATCH_NR];
31 struct page *pages[FREE_PTE_NR];
32};
33
34DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
35
36#ifdef CONFIG_SMP 10#ifdef CONFIG_SMP
37extern void smp_flush_tlb_pending(struct mm_struct *, 11extern void smp_flush_tlb_pending(struct mm_struct *,
38 unsigned long, unsigned long *); 12 unsigned long, unsigned long *);
39#endif 13#endif
40 14
41extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
42extern void flush_tlb_pending(void);
43
44static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
45{
46 struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
47
48 BUG_ON(mp->tlb_nr);
49
50 mp->mm = mm;
51 mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
52 mp->fullmm = full_mm_flush;
53
54 return mp;
55}
56
57
58static inline void tlb_flush_mmu(struct mmu_gather *mp)
59{
60 if (!mp->fullmm)
61 flush_tlb_pending();
62 if (mp->need_flush) {
63 free_pages_and_swap_cache(mp->pages, mp->pages_nr);
64 mp->pages_nr = 0;
65 mp->need_flush = 0;
66 }
67
68}
69
70#ifdef CONFIG_SMP 15#ifdef CONFIG_SMP
71extern void smp_flush_tlb_mm(struct mm_struct *mm); 16extern void smp_flush_tlb_mm(struct mm_struct *mm);
72#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm) 17#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
@@ -74,38 +19,14 @@ extern void smp_flush_tlb_mm(struct mm_struct *mm);
74#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT) 19#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
75#endif 20#endif
76 21
77static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end) 22extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
78{ 23extern void flush_tlb_pending(void);
79 tlb_flush_mmu(mp);
80
81 if (mp->fullmm)
82 mp->fullmm = 0;
83
84 /* keep the page table cache within bounds */
85 check_pgt_cache();
86
87 put_cpu_var(mmu_gathers);
88}
89
90static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
91{
92 if (tlb_fast_mode(mp)) {
93 free_page_and_swap_cache(page);
94 return;
95 }
96 mp->need_flush = 1;
97 mp->pages[mp->pages_nr++] = page;
98 if (mp->pages_nr >= FREE_PTE_NR)
99 tlb_flush_mmu(mp);
100}
101
102#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
103#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
104#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
105#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
106 24
107#define tlb_migrate_finish(mm) do { } while (0)
108#define tlb_start_vma(tlb, vma) do { } while (0) 25#define tlb_start_vma(tlb, vma) do { } while (0)
109#define tlb_end_vma(tlb, vma) do { } while (0) 26#define tlb_end_vma(tlb, vma) do { } while (0)
27#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
28#define tlb_flush(tlb) flush_tlb_pending()
29
30#include <asm-generic/tlb.h>
110 31
111#endif /* _SPARC64_TLB_H */ 32#endif /* _SPARC64_TLB_H */
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index fbb675dbe0c9..2ef463494153 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -5,9 +5,17 @@
5#include <asm/mmu_context.h> 5#include <asm/mmu_context.h>
6 6
7/* TSB flush operations. */ 7/* TSB flush operations. */
8struct mmu_gather; 8
9#define TLB_BATCH_NR 192
10
11struct tlb_batch {
12 struct mm_struct *mm;
13 unsigned long tlb_nr;
14 unsigned long vaddrs[TLB_BATCH_NR];
15};
16
9extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); 17extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
10extern void flush_tsb_user(struct mmu_gather *mp); 18extern void flush_tsb_user(struct tlb_batch *tb);
11 19
12/* TLB flush operations. */ 20/* TLB flush operations. */
13 21
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 3609bdee9ed2..3249d3f3234d 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -82,7 +82,7 @@ static void prom_sync_me(void)
82 "nop\n\t" : : "r" (&trapbase)); 82 "nop\n\t" : : "r" (&trapbase));
83 83
84 prom_printf("PROM SYNC COMMAND...\n"); 84 prom_printf("PROM SYNC COMMAND...\n");
85 show_free_areas(); 85 show_free_areas(0);
86 if(current->pid != 0) { 86 if(current->pid != 0) {
87 local_irq_enable(); 87 local_irq_enable();
88 sys_sync(); 88 sys_sync();
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 4c31e2b6e71b..ca217327e8d2 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -37,8 +37,6 @@
37#include <asm/prom.h> 37#include <asm/prom.h>
38#include <asm/leon.h> 38#include <asm/leon.h>
39 39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
42unsigned long *sparc_valid_addr_bitmap; 40unsigned long *sparc_valid_addr_bitmap;
43EXPORT_SYMBOL(sparc_valid_addr_bitmap); 41EXPORT_SYMBOL(sparc_valid_addr_bitmap);
44 42
@@ -78,7 +76,7 @@ void __init kmap_init(void)
78void show_mem(unsigned int filter) 76void show_mem(unsigned int filter)
79{ 77{
80 printk("Mem-info:\n"); 78 printk("Mem-info:\n");
81 show_free_areas(); 79 show_free_areas(filter);
82 printk("Free swap: %6ldkB\n", 80 printk("Free swap: %6ldkB\n",
83 nr_swap_pages << (PAGE_SHIFT-10)); 81 nr_swap_pages << (PAGE_SHIFT-10));
84 printk("%ld pages of RAM\n", totalram_pages); 82 printk("%ld pages of RAM\n", totalram_pages);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index d8f21e24a82f..b1f279cd00bf 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -19,33 +19,34 @@
19 19
20/* Heavily inspired by the ppc64 code. */ 20/* Heavily inspired by the ppc64 code. */
21 21
22DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 22static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23 23
24void flush_tlb_pending(void) 24void flush_tlb_pending(void)
25{ 25{
26 struct mmu_gather *mp = &get_cpu_var(mmu_gathers); 26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27 27
28 if (mp->tlb_nr) { 28 if (tb->tlb_nr) {
29 flush_tsb_user(mp); 29 flush_tsb_user(tb);
30 30
31 if (CTX_VALID(mp->mm->context)) { 31 if (CTX_VALID(tb->mm->context)) {
32#ifdef CONFIG_SMP 32#ifdef CONFIG_SMP
33 smp_flush_tlb_pending(mp->mm, mp->tlb_nr, 33 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
34 &mp->vaddrs[0]); 34 &tb->vaddrs[0]);
35#else 35#else
36 __flush_tlb_pending(CTX_HWBITS(mp->mm->context), 36 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
37 mp->tlb_nr, &mp->vaddrs[0]); 37 tb->tlb_nr, &tb->vaddrs[0]);
38#endif 38#endif
39 } 39 }
40 mp->tlb_nr = 0; 40 tb->tlb_nr = 0;
41 } 41 }
42 42
43 put_cpu_var(mmu_gathers); 43 put_cpu_var(tlb_batch);
44} 44}
45 45
46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) 46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
47 pte_t *ptep, pte_t orig, int fullmm)
47{ 48{
48 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); 49 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
49 unsigned long nr; 50 unsigned long nr;
50 51
51 vaddr &= PAGE_MASK; 52 vaddr &= PAGE_MASK;
@@ -77,21 +78,25 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t
77 78
78no_cache_flush: 79no_cache_flush:
79 80
80 if (mp->fullmm) 81 if (fullmm) {
82 put_cpu_var(tlb_batch);
81 return; 83 return;
84 }
82 85
83 nr = mp->tlb_nr; 86 nr = tb->tlb_nr;
84 87
85 if (unlikely(nr != 0 && mm != mp->mm)) { 88 if (unlikely(nr != 0 && mm != tb->mm)) {
86 flush_tlb_pending(); 89 flush_tlb_pending();
87 nr = 0; 90 nr = 0;
88 } 91 }
89 92
90 if (nr == 0) 93 if (nr == 0)
91 mp->mm = mm; 94 tb->mm = mm;
92 95
93 mp->vaddrs[nr] = vaddr; 96 tb->vaddrs[nr] = vaddr;
94 mp->tlb_nr = ++nr; 97 tb->tlb_nr = ++nr;
95 if (nr >= TLB_BATCH_NR) 98 if (nr >= TLB_BATCH_NR)
96 flush_tlb_pending(); 99 flush_tlb_pending();
100
101 put_cpu_var(tlb_batch);
97} 102}
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 101d7c82870b..948461513499 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
47 } 47 }
48} 48}
49 49
50static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) 50static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
51 unsigned long tsb, unsigned long nentries)
51{ 52{
52 unsigned long i; 53 unsigned long i;
53 54
54 for (i = 0; i < mp->tlb_nr; i++) { 55 for (i = 0; i < tb->tlb_nr; i++) {
55 unsigned long v = mp->vaddrs[i]; 56 unsigned long v = tb->vaddrs[i];
56 unsigned long tag, ent, hash; 57 unsigned long tag, ent, hash;
57 58
58 v &= ~0x1UL; 59 v &= ~0x1UL;
@@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, uns
65 } 66 }
66} 67}
67 68
68void flush_tsb_user(struct mmu_gather *mp) 69void flush_tsb_user(struct tlb_batch *tb)
69{ 70{
70 struct mm_struct *mm = mp->mm; 71 struct mm_struct *mm = tb->mm;
71 unsigned long nentries, base, flags; 72 unsigned long nentries, base, flags;
72 73
73 spin_lock_irqsave(&mm->context.lock, flags); 74 spin_lock_irqsave(&mm->context.lock, flags);
@@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *mp)
76 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 77 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
77 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 78 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
78 base = __pa(base); 79 base = __pa(base);
79 __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); 80 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
80 81
81#ifdef CONFIG_HUGETLB_PAGE 82#ifdef CONFIG_HUGETLB_PAGE
82 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { 83 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
@@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *mp)
84 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 85 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
85 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 86 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
86 base = __pa(base); 87 base = __pa(base);
87 __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); 88 __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
88 } 89 }
89#endif 90#endif
90 spin_unlock_irqrestore(&mm->context.lock, flags); 91 spin_unlock_irqrestore(&mm->context.lock, flags);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index e32b0c23c4c8..635e1bfb1c5d 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -339,6 +339,14 @@ config NO_IOPORT
339 339
340source "drivers/pci/Kconfig" 340source "drivers/pci/Kconfig"
341 341
342config HOTPLUG
343 bool "Support for hot-pluggable devices"
344 ---help---
345 Say Y here if you want to plug devices into your computer while
346 the system is running, and be able to use them quickly. In many
347 cases, the devices can likewise be unplugged at any time too.
348 One well-known example of this is USB.
349
342source "drivers/pci/hotplug/Kconfig" 350source "drivers/pci/hotplug/Kconfig"
343 351
344endmenu 352endmenu
diff --git a/arch/tile/Kconfig.debug b/arch/tile/Kconfig.debug
index 9bc161a02c71..ddbfc3322d7f 100644
--- a/arch/tile/Kconfig.debug
+++ b/arch/tile/Kconfig.debug
@@ -21,15 +21,6 @@ config DEBUG_STACKOVERFLOW
21 This option will cause messages to be printed if free stack space 21 This option will cause messages to be printed if free stack space
22 drops below a certain limit. 22 drops below a certain limit.
23 23
24config DEBUG_STACK_USAGE
25 bool "Stack utilization instrumentation"
26 depends on DEBUG_KERNEL
27 help
28 Enables the display of the minimum amount of free stack which each
29 task has ever had available in the sysrq-T and sysrq-P debug output.
30
31 This option will slow down process creation somewhat.
32
33config DEBUG_EXTRA_FLAGS 24config DEBUG_EXTRA_FLAGS
34 string "Additional compiler arguments when building with '-g'" 25 string "Additional compiler arguments when building with '-g'"
35 depends on DEBUG_INFO 26 depends on DEBUG_INFO
diff --git a/arch/tile/configs/tile_defconfig b/arch/tile/configs/tile_defconfig
deleted file mode 100644
index 0fe54445fda5..000000000000
--- a/arch/tile/configs/tile_defconfig
+++ /dev/null
@@ -1,71 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y
4CONFIG_BLK_DEV_INITRD=y
5CONFIG_INITRAMFS_SOURCE="usr/contents.txt"
6CONFIG_EXPERT=y
7# CONFIG_COMPAT_BRK is not set
8CONFIG_PROFILING=y
9CONFIG_MODULES=y
10CONFIG_MODULE_UNLOAD=y
11# CONFIG_BLK_DEV_BSG is not set
12# CONFIG_IOSCHED_DEADLINE is not set
13# CONFIG_IOSCHED_CFQ is not set
14CONFIG_NO_HZ=y
15CONFIG_HIGH_RES_TIMERS=y
16CONFIG_HZ_100=y
17CONFIG_NET=y
18CONFIG_PACKET=y
19CONFIG_UNIX=y
20CONFIG_INET=y
21CONFIG_IP_MULTICAST=y
22# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
23# CONFIG_INET_XFRM_MODE_TUNNEL is not set
24# CONFIG_INET_LRO is not set
25# CONFIG_INET_DIAG is not set
26CONFIG_IPV6=y
27# CONFIG_WIRELESS is not set
28CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
29CONFIG_SCSI=y
30CONFIG_BLK_DEV_SD=y
31CONFIG_SCSI_CONSTANTS=y
32CONFIG_SCSI_LOGGING=y
33CONFIG_NETDEVICES=y
34CONFIG_TUN=y
35# CONFIG_NETDEV_10000 is not set
36# CONFIG_WLAN is not set
37# CONFIG_INPUT_MOUSEDEV is not set
38# CONFIG_INPUT_KEYBOARD is not set
39# CONFIG_INPUT_MOUSE is not set
40# CONFIG_SERIO is not set
41# CONFIG_VT is not set
42# CONFIG_LEGACY_PTYS is not set
43# CONFIG_HW_RANDOM is not set
44CONFIG_WATCHDOG=y
45CONFIG_WATCHDOG_NOWAYOUT=y
46# CONFIG_HID_SUPPORT is not set
47CONFIG_RTC_CLASS=y
48# CONFIG_RTC_INTF_SYSFS is not set
49# CONFIG_RTC_INTF_PROC is not set
50CONFIG_EXT2_FS=y
51CONFIG_EXT3_FS=y
52# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
53CONFIG_FUSE_FS=y
54CONFIG_MSDOS_FS=y
55CONFIG_VFAT_FS=m
56CONFIG_TMPFS=y
57CONFIG_HUGETLBFS=y
58CONFIG_NFS_FS=m
59CONFIG_NFS_V3=y
60CONFIG_NLS_CODEPAGE_437=y
61CONFIG_NLS_ISO8859_1=y
62CONFIG_FRAME_WARN=2048
63CONFIG_MAGIC_SYSRQ=y
64CONFIG_DEBUG_KERNEL=y
65CONFIG_DETECT_HUNG_TASK=y
66CONFIG_DEBUG_SPINLOCK_SLEEP=y
67CONFIG_DEBUG_INFO=y
68CONFIG_DEBUG_VM=y
69# CONFIG_RCU_CPU_STALL_DETECTOR is not set
70CONFIG_DEBUG_STACKOVERFLOW=y
71CONFIG_DEBUG_EXTRA_FLAGS="-femit-struct-debug-baseonly"
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
new file mode 100644
index 000000000000..09f1c7fad8bf
--- /dev/null
+++ b/arch/tile/configs/tilegx_defconfig
@@ -0,0 +1,1833 @@
1#
2# Automatically generated make config: don't edit
3# Linux/tilegx 2.6.39-rc5 Kernel Configuration
4# Wed May 4 11:08:04 2011
5#
6CONFIG_TILE=y
7CONFIG_MMU=y
8CONFIG_GENERIC_CSUM=y
9CONFIG_SEMAPHORE_SLEEPERS=y
10CONFIG_HAVE_ARCH_ALLOC_REMAP=y
11CONFIG_HAVE_SETUP_PER_CPU_AREA=y
12CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
13CONFIG_SYS_SUPPORTS_HUGETLBFS=y
14CONFIG_GENERIC_TIME=y
15CONFIG_GENERIC_CLOCKEVENTS=y
16CONFIG_RWSEM_GENERIC_SPINLOCK=y
17CONFIG_DEFAULT_MIGRATION_COST=10000000
18CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
19CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
20CONFIG_ARCH_DMA_ADDR_T_64BIT=y
21CONFIG_LOCKDEP_SUPPORT=y
22CONFIG_STACKTRACE_SUPPORT=y
23CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
24CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
25CONFIG_TRACE_IRQFLAGS_SUPPORT=y
26CONFIG_STRICT_DEVMEM=y
27CONFIG_SMP=y
28# CONFIG_DEBUG_COPY_FROM_USER is not set
29CONFIG_HVC_TILE=y
30CONFIG_TILEGX=y
31CONFIG_64BIT=y
32CONFIG_ARCH_DEFCONFIG="arch/tile/configs/tilegx_defconfig"
33CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
34CONFIG_CONSTRUCTORS=y
35
36#
37# General setup
38#
39CONFIG_EXPERIMENTAL=y
40CONFIG_INIT_ENV_ARG_LIMIT=32
41CONFIG_CROSS_COMPILE=""
42CONFIG_LOCALVERSION=""
43# CONFIG_LOCALVERSION_AUTO is not set
44CONFIG_SWAP=y
45CONFIG_SYSVIPC=y
46CONFIG_SYSVIPC_SYSCTL=y
47CONFIG_POSIX_MQUEUE=y
48CONFIG_POSIX_MQUEUE_SYSCTL=y
49CONFIG_BSD_PROCESS_ACCT=y
50CONFIG_BSD_PROCESS_ACCT_V3=y
51# CONFIG_FHANDLE is not set
52CONFIG_TASKSTATS=y
53CONFIG_TASK_DELAY_ACCT=y
54CONFIG_TASK_XACCT=y
55CONFIG_TASK_IO_ACCOUNTING=y
56CONFIG_AUDIT=y
57CONFIG_HAVE_GENERIC_HARDIRQS=y
58
59#
60# IRQ subsystem
61#
62CONFIG_GENERIC_HARDIRQS=y
63CONFIG_GENERIC_IRQ_PROBE=y
64CONFIG_GENERIC_IRQ_SHOW=y
65CONFIG_GENERIC_PENDING_IRQ=y
66
67#
68# RCU Subsystem
69#
70CONFIG_TREE_RCU=y
71# CONFIG_PREEMPT_RCU is not set
72# CONFIG_RCU_TRACE is not set
73CONFIG_RCU_FANOUT=64
74# CONFIG_RCU_FANOUT_EXACT is not set
75# CONFIG_RCU_FAST_NO_HZ is not set
76# CONFIG_TREE_RCU_TRACE is not set
77# CONFIG_IKCONFIG is not set
78CONFIG_LOG_BUF_SHIFT=19
79CONFIG_CGROUPS=y
80CONFIG_CGROUP_DEBUG=y
81CONFIG_CGROUP_NS=y
82# CONFIG_CGROUP_FREEZER is not set
83CONFIG_CGROUP_DEVICE=y
84CONFIG_CPUSETS=y
85CONFIG_PROC_PID_CPUSET=y
86CONFIG_CGROUP_CPUACCT=y
87CONFIG_RESOURCE_COUNTERS=y
88CONFIG_CGROUP_MEM_RES_CTLR=y
89CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
90CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED=y
91CONFIG_CGROUP_SCHED=y
92CONFIG_FAIR_GROUP_SCHED=y
93CONFIG_RT_GROUP_SCHED=y
94CONFIG_BLK_CGROUP=y
95# CONFIG_DEBUG_BLK_CGROUP is not set
96CONFIG_NAMESPACES=y
97CONFIG_UTS_NS=y
98CONFIG_IPC_NS=y
99CONFIG_USER_NS=y
100CONFIG_PID_NS=y
101CONFIG_NET_NS=y
102# CONFIG_SCHED_AUTOGROUP is not set
103CONFIG_MM_OWNER=y
104# CONFIG_SYSFS_DEPRECATED is not set
105CONFIG_RELAY=y
106CONFIG_BLK_DEV_INITRD=y
107CONFIG_INITRAMFS_SOURCE="usr/contents.txt"
108CONFIG_INITRAMFS_ROOT_UID=0
109CONFIG_INITRAMFS_ROOT_GID=0
110CONFIG_RD_GZIP=y
111# CONFIG_RD_BZIP2 is not set
112# CONFIG_RD_LZMA is not set
113# CONFIG_RD_XZ is not set
114# CONFIG_RD_LZO is not set
115CONFIG_INITRAMFS_COMPRESSION_NONE=y
116# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
117CONFIG_CC_OPTIMIZE_FOR_SIZE=y
118CONFIG_SYSCTL=y
119CONFIG_ANON_INODES=y
120CONFIG_EXPERT=y
121CONFIG_SYSCTL_SYSCALL=y
122CONFIG_KALLSYMS=y
123# CONFIG_KALLSYMS_ALL is not set
124# CONFIG_KALLSYMS_EXTRA_PASS is not set
125CONFIG_HOTPLUG=y
126CONFIG_PRINTK=y
127CONFIG_BUG=y
128CONFIG_ELF_CORE=y
129CONFIG_BASE_FULL=y
130CONFIG_FUTEX=y
131CONFIG_EPOLL=y
132CONFIG_SIGNALFD=y
133CONFIG_TIMERFD=y
134CONFIG_EVENTFD=y
135CONFIG_SHMEM=y
136CONFIG_AIO=y
137CONFIG_EMBEDDED=y
138
139#
140# Kernel Performance Events And Counters
141#
142CONFIG_VM_EVENT_COUNTERS=y
143CONFIG_PCI_QUIRKS=y
144CONFIG_SLUB_DEBUG=y
145# CONFIG_COMPAT_BRK is not set
146# CONFIG_SLAB is not set
147CONFIG_SLUB=y
148# CONFIG_SLOB is not set
149CONFIG_PROFILING=y
150CONFIG_USE_GENERIC_SMP_HELPERS=y
151
152#
153# GCOV-based kernel profiling
154#
155# CONFIG_GCOV_KERNEL is not set
156# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
157CONFIG_SLABINFO=y
158CONFIG_RT_MUTEXES=y
159CONFIG_BASE_SMALL=0
160CONFIG_MODULES=y
161CONFIG_MODULE_FORCE_LOAD=y
162CONFIG_MODULE_UNLOAD=y
163# CONFIG_MODULE_FORCE_UNLOAD is not set
164# CONFIG_MODVERSIONS is not set
165# CONFIG_MODULE_SRCVERSION_ALL is not set
166CONFIG_STOP_MACHINE=y
167CONFIG_BLOCK=y
168CONFIG_BLK_DEV_BSG=y
169CONFIG_BLK_DEV_INTEGRITY=y
170# CONFIG_BLK_DEV_THROTTLING is not set
171CONFIG_BLOCK_COMPAT=y
172
173#
174# IO Schedulers
175#
176CONFIG_IOSCHED_NOOP=y
177CONFIG_IOSCHED_DEADLINE=y
178CONFIG_IOSCHED_CFQ=y
179CONFIG_CFQ_GROUP_IOSCHED=y
180# CONFIG_DEFAULT_DEADLINE is not set
181CONFIG_DEFAULT_CFQ=y
182# CONFIG_DEFAULT_NOOP is not set
183CONFIG_DEFAULT_IOSCHED="cfq"
184CONFIG_PADATA=y
185# CONFIG_INLINE_SPIN_TRYLOCK is not set
186# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
187# CONFIG_INLINE_SPIN_LOCK is not set
188# CONFIG_INLINE_SPIN_LOCK_BH is not set
189# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
190# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
191CONFIG_INLINE_SPIN_UNLOCK=y
192# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
193CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
194# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
195# CONFIG_INLINE_READ_TRYLOCK is not set
196# CONFIG_INLINE_READ_LOCK is not set
197# CONFIG_INLINE_READ_LOCK_BH is not set
198# CONFIG_INLINE_READ_LOCK_IRQ is not set
199# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
200CONFIG_INLINE_READ_UNLOCK=y
201# CONFIG_INLINE_READ_UNLOCK_BH is not set
202CONFIG_INLINE_READ_UNLOCK_IRQ=y
203# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
204# CONFIG_INLINE_WRITE_TRYLOCK is not set
205# CONFIG_INLINE_WRITE_LOCK is not set
206# CONFIG_INLINE_WRITE_LOCK_BH is not set
207# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
208# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
209CONFIG_INLINE_WRITE_UNLOCK=y
210# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
211CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
212# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
213CONFIG_MUTEX_SPIN_ON_OWNER=y
214
215#
216# Tilera-specific configuration
217#
218CONFIG_NR_CPUS=100
219CONFIG_TICK_ONESHOT=y
220CONFIG_NO_HZ=y
221CONFIG_HIGH_RES_TIMERS=y
222CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
223CONFIG_HZ_100=y
224# CONFIG_HZ_250 is not set
225# CONFIG_HZ_300 is not set
226# CONFIG_HZ_1000 is not set
227CONFIG_HZ=100
228CONFIG_SCHED_HRTICK=y
229# CONFIG_KEXEC is not set
230CONFIG_COMPAT=y
231CONFIG_SYSVIPC_COMPAT=y
232# CONFIG_HIGHMEM is not set
233CONFIG_NUMA=y
234CONFIG_NODES_SHIFT=2
235CONFIG_PAGE_OFFSET=0xC0000000
236CONFIG_SELECT_MEMORY_MODEL=y
237CONFIG_DISCONTIGMEM_MANUAL=y
238CONFIG_DISCONTIGMEM=y
239CONFIG_FLAT_NODE_MEM_MAP=y
240CONFIG_NEED_MULTIPLE_NODES=y
241CONFIG_PAGEFLAGS_EXTENDED=y
242CONFIG_SPLIT_PTLOCK_CPUS=4
243# CONFIG_COMPACTION is not set
244CONFIG_MIGRATION=y
245CONFIG_PHYS_ADDR_T_64BIT=y
246CONFIG_ZONE_DMA_FLAG=0
247CONFIG_VIRT_TO_BUS=y
248# CONFIG_KSM is not set
249CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
250# CONFIG_CMDLINE_BOOL is not set
251CONFIG_VMALLOC_RESERVE=0x1000000
252CONFIG_HARDWALL=y
253CONFIG_KERNEL_PL=1
254
255#
256# Bus options
257#
258CONFIG_PCI=y
259CONFIG_PCI_DOMAINS=y
260# CONFIG_NO_IOMEM is not set
261# CONFIG_NO_IOPORT is not set
262# CONFIG_ARCH_SUPPORTS_MSI is not set
263CONFIG_PCI_DEBUG=y
264# CONFIG_PCI_STUB is not set
265# CONFIG_PCI_IOV is not set
266# CONFIG_HOTPLUG_PCI is not set
267
268#
269# Executable file formats
270#
271CONFIG_KCORE_ELF=y
272CONFIG_BINFMT_ELF=y
273CONFIG_COMPAT_BINFMT_ELF=y
274# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
275# CONFIG_HAVE_AOUT is not set
276CONFIG_BINFMT_MISC=y
277CONFIG_NET=y
278
279#
280# Networking options
281#
282CONFIG_PACKET=y
283CONFIG_UNIX=y
284CONFIG_XFRM=y
285CONFIG_XFRM_USER=y
286CONFIG_XFRM_SUB_POLICY=y
287CONFIG_XFRM_MIGRATE=y
288CONFIG_XFRM_STATISTICS=y
289CONFIG_XFRM_IPCOMP=m
290CONFIG_NET_KEY=m
291CONFIG_NET_KEY_MIGRATE=y
292CONFIG_INET=y
293CONFIG_IP_MULTICAST=y
294CONFIG_IP_ADVANCED_ROUTER=y
295# CONFIG_IP_FIB_TRIE_STATS is not set
296CONFIG_IP_MULTIPLE_TABLES=y
297CONFIG_IP_ROUTE_MULTIPATH=y
298CONFIG_IP_ROUTE_VERBOSE=y
299CONFIG_IP_ROUTE_CLASSID=y
300# CONFIG_IP_PNP is not set
301CONFIG_NET_IPIP=m
302# CONFIG_NET_IPGRE_DEMUX is not set
303CONFIG_IP_MROUTE=y
304# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
305CONFIG_IP_PIMSM_V1=y
306CONFIG_IP_PIMSM_V2=y
307# CONFIG_ARPD is not set
308CONFIG_SYN_COOKIES=y
309CONFIG_INET_AH=m
310CONFIG_INET_ESP=m
311CONFIG_INET_IPCOMP=m
312CONFIG_INET_XFRM_TUNNEL=m
313CONFIG_INET_TUNNEL=m
314CONFIG_INET_XFRM_MODE_TRANSPORT=m
315CONFIG_INET_XFRM_MODE_TUNNEL=m
316CONFIG_INET_XFRM_MODE_BEET=m
317CONFIG_INET_LRO=y
318CONFIG_INET_DIAG=m
319CONFIG_INET_TCP_DIAG=m
320CONFIG_TCP_CONG_ADVANCED=y
321CONFIG_TCP_CONG_BIC=m
322CONFIG_TCP_CONG_CUBIC=y
323CONFIG_TCP_CONG_WESTWOOD=m
324CONFIG_TCP_CONG_HTCP=m
325CONFIG_TCP_CONG_HSTCP=m
326CONFIG_TCP_CONG_HYBLA=m
327CONFIG_TCP_CONG_VEGAS=m
328CONFIG_TCP_CONG_SCALABLE=m
329CONFIG_TCP_CONG_LP=m
330CONFIG_TCP_CONG_VENO=m
331CONFIG_TCP_CONG_YEAH=m
332CONFIG_TCP_CONG_ILLINOIS=m
333CONFIG_DEFAULT_CUBIC=y
334# CONFIG_DEFAULT_RENO is not set
335CONFIG_DEFAULT_TCP_CONG="cubic"
336CONFIG_TCP_MD5SIG=y
337CONFIG_IPV6=y
338CONFIG_IPV6_PRIVACY=y
339CONFIG_IPV6_ROUTER_PREF=y
340CONFIG_IPV6_ROUTE_INFO=y
341CONFIG_IPV6_OPTIMISTIC_DAD=y
342CONFIG_INET6_AH=m
343CONFIG_INET6_ESP=m
344CONFIG_INET6_IPCOMP=m
345CONFIG_IPV6_MIP6=m
346CONFIG_INET6_XFRM_TUNNEL=m
347CONFIG_INET6_TUNNEL=m
348CONFIG_INET6_XFRM_MODE_TRANSPORT=m
349CONFIG_INET6_XFRM_MODE_TUNNEL=m
350CONFIG_INET6_XFRM_MODE_BEET=m
351CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
352CONFIG_IPV6_SIT=m
353# CONFIG_IPV6_SIT_6RD is not set
354CONFIG_IPV6_NDISC_NODETYPE=y
355CONFIG_IPV6_TUNNEL=m
356CONFIG_IPV6_MULTIPLE_TABLES=y
357# CONFIG_IPV6_SUBTREES is not set
358CONFIG_IPV6_MROUTE=y
359# CONFIG_IPV6_MROUTE_MULTIPLE_TABLES is not set
360CONFIG_IPV6_PIMSM_V2=y
361CONFIG_NETLABEL=y
362CONFIG_NETWORK_SECMARK=y
363# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
364CONFIG_NETFILTER=y
365# CONFIG_NETFILTER_DEBUG is not set
366CONFIG_NETFILTER_ADVANCED=y
367CONFIG_BRIDGE_NETFILTER=y
368
369#
370# Core Netfilter Configuration
371#
372CONFIG_NETFILTER_NETLINK=m
373CONFIG_NETFILTER_NETLINK_QUEUE=m
374CONFIG_NETFILTER_NETLINK_LOG=m
375CONFIG_NF_CONNTRACK=y
376CONFIG_NF_CONNTRACK_MARK=y
377CONFIG_NF_CONNTRACK_SECMARK=y
378CONFIG_NF_CONNTRACK_ZONES=y
379CONFIG_NF_CONNTRACK_EVENTS=y
380# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
381CONFIG_NF_CT_PROTO_DCCP=m
382CONFIG_NF_CT_PROTO_GRE=m
383CONFIG_NF_CT_PROTO_SCTP=m
384CONFIG_NF_CT_PROTO_UDPLITE=m
385CONFIG_NF_CONNTRACK_AMANDA=m
386CONFIG_NF_CONNTRACK_FTP=m
387CONFIG_NF_CONNTRACK_H323=m
388CONFIG_NF_CONNTRACK_IRC=m
389CONFIG_NF_CONNTRACK_BROADCAST=m
390CONFIG_NF_CONNTRACK_NETBIOS_NS=m
391# CONFIG_NF_CONNTRACK_SNMP is not set
392CONFIG_NF_CONNTRACK_PPTP=m
393CONFIG_NF_CONNTRACK_SANE=m
394CONFIG_NF_CONNTRACK_SIP=m
395CONFIG_NF_CONNTRACK_TFTP=m
396# CONFIG_NF_CT_NETLINK is not set
397CONFIG_NETFILTER_TPROXY=m
398CONFIG_NETFILTER_XTABLES=y
399
400#
401# Xtables combined modules
402#
403CONFIG_NETFILTER_XT_MARK=m
404CONFIG_NETFILTER_XT_CONNMARK=m
405
406#
407# Xtables targets
408#
409# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
410# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
411CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
412CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
413CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
414CONFIG_NETFILTER_XT_TARGET_CT=m
415CONFIG_NETFILTER_XT_TARGET_DSCP=m
416CONFIG_NETFILTER_XT_TARGET_HL=m
417CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
418CONFIG_NETFILTER_XT_TARGET_MARK=m
419CONFIG_NETFILTER_XT_TARGET_NFLOG=m
420CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
421CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
422CONFIG_NETFILTER_XT_TARGET_RATEEST=m
423CONFIG_NETFILTER_XT_TARGET_TEE=m
424CONFIG_NETFILTER_XT_TARGET_TPROXY=m
425CONFIG_NETFILTER_XT_TARGET_TRACE=m
426CONFIG_NETFILTER_XT_TARGET_SECMARK=m
427CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
428CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
429
430#
431# Xtables matches
432#
433# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
434CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
435CONFIG_NETFILTER_XT_MATCH_COMMENT=m
436CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
437CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
438CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
439CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
440# CONFIG_NETFILTER_XT_MATCH_CPU is not set
441CONFIG_NETFILTER_XT_MATCH_DCCP=m
442# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
443CONFIG_NETFILTER_XT_MATCH_DSCP=m
444CONFIG_NETFILTER_XT_MATCH_ESP=m
445CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
446CONFIG_NETFILTER_XT_MATCH_HELPER=m
447CONFIG_NETFILTER_XT_MATCH_HL=m
448CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
449CONFIG_NETFILTER_XT_MATCH_IPVS=m
450CONFIG_NETFILTER_XT_MATCH_LENGTH=m
451CONFIG_NETFILTER_XT_MATCH_LIMIT=m
452CONFIG_NETFILTER_XT_MATCH_MAC=m
453CONFIG_NETFILTER_XT_MATCH_MARK=m
454CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
455CONFIG_NETFILTER_XT_MATCH_OSF=m
456CONFIG_NETFILTER_XT_MATCH_OWNER=m
457CONFIG_NETFILTER_XT_MATCH_POLICY=m
458CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
459CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
460CONFIG_NETFILTER_XT_MATCH_QUOTA=m
461CONFIG_NETFILTER_XT_MATCH_RATEEST=m
462CONFIG_NETFILTER_XT_MATCH_REALM=m
463CONFIG_NETFILTER_XT_MATCH_RECENT=m
464CONFIG_NETFILTER_XT_MATCH_SCTP=m
465CONFIG_NETFILTER_XT_MATCH_SOCKET=m
466CONFIG_NETFILTER_XT_MATCH_STATE=y
467CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
468CONFIG_NETFILTER_XT_MATCH_STRING=m
469CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
470CONFIG_NETFILTER_XT_MATCH_TIME=m
471CONFIG_NETFILTER_XT_MATCH_U32=m
472# CONFIG_IP_SET is not set
473CONFIG_IP_VS=m
474CONFIG_IP_VS_IPV6=y
475# CONFIG_IP_VS_DEBUG is not set
476CONFIG_IP_VS_TAB_BITS=12
477
478#
479# IPVS transport protocol load balancing support
480#
481CONFIG_IP_VS_PROTO_TCP=y
482CONFIG_IP_VS_PROTO_UDP=y
483CONFIG_IP_VS_PROTO_AH_ESP=y
484CONFIG_IP_VS_PROTO_ESP=y
485CONFIG_IP_VS_PROTO_AH=y
486CONFIG_IP_VS_PROTO_SCTP=y
487
488#
489# IPVS scheduler
490#
491CONFIG_IP_VS_RR=m
492CONFIG_IP_VS_WRR=m
493CONFIG_IP_VS_LC=m
494CONFIG_IP_VS_WLC=m
495CONFIG_IP_VS_LBLC=m
496CONFIG_IP_VS_LBLCR=m
497# CONFIG_IP_VS_DH is not set
498# CONFIG_IP_VS_SH is not set
499CONFIG_IP_VS_SED=m
500CONFIG_IP_VS_NQ=m
501
502#
503# IPVS application helper
504#
505# CONFIG_IP_VS_NFCT is not set
506# CONFIG_IP_VS_PE_SIP is not set
507
508#
509# IP: Netfilter Configuration
510#
511CONFIG_NF_DEFRAG_IPV4=y
512CONFIG_NF_CONNTRACK_IPV4=y
513# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
514CONFIG_IP_NF_QUEUE=m
515CONFIG_IP_NF_IPTABLES=y
516CONFIG_IP_NF_MATCH_AH=m
517CONFIG_IP_NF_MATCH_ECN=m
518CONFIG_IP_NF_MATCH_TTL=m
519CONFIG_IP_NF_FILTER=y
520CONFIG_IP_NF_TARGET_REJECT=y
521CONFIG_IP_NF_TARGET_LOG=m
522CONFIG_IP_NF_TARGET_ULOG=m
523# CONFIG_NF_NAT is not set
524CONFIG_IP_NF_MANGLE=m
525# CONFIG_IP_NF_TARGET_CLUSTERIP is not set
526CONFIG_IP_NF_TARGET_ECN=m
527CONFIG_IP_NF_TARGET_TTL=m
528CONFIG_IP_NF_RAW=m
529CONFIG_IP_NF_SECURITY=m
530CONFIG_IP_NF_ARPTABLES=m
531CONFIG_IP_NF_ARPFILTER=m
532CONFIG_IP_NF_ARP_MANGLE=m
533
534#
535# IPv6: Netfilter Configuration
536#
537CONFIG_NF_DEFRAG_IPV6=m
538CONFIG_NF_CONNTRACK_IPV6=m
539CONFIG_IP6_NF_QUEUE=m
540CONFIG_IP6_NF_IPTABLES=m
541CONFIG_IP6_NF_MATCH_AH=m
542CONFIG_IP6_NF_MATCH_EUI64=m
543CONFIG_IP6_NF_MATCH_FRAG=m
544CONFIG_IP6_NF_MATCH_OPTS=m
545CONFIG_IP6_NF_MATCH_HL=m
546CONFIG_IP6_NF_MATCH_IPV6HEADER=m
547CONFIG_IP6_NF_MATCH_MH=m
548CONFIG_IP6_NF_MATCH_RT=m
549CONFIG_IP6_NF_TARGET_HL=m
550CONFIG_IP6_NF_TARGET_LOG=m
551CONFIG_IP6_NF_FILTER=m
552CONFIG_IP6_NF_TARGET_REJECT=m
553CONFIG_IP6_NF_MANGLE=m
554CONFIG_IP6_NF_RAW=m
555CONFIG_IP6_NF_SECURITY=m
556CONFIG_BRIDGE_NF_EBTABLES=m
557CONFIG_BRIDGE_EBT_BROUTE=m
558CONFIG_BRIDGE_EBT_T_FILTER=m
559CONFIG_BRIDGE_EBT_T_NAT=m
560CONFIG_BRIDGE_EBT_802_3=m
561CONFIG_BRIDGE_EBT_AMONG=m
562CONFIG_BRIDGE_EBT_ARP=m
563CONFIG_BRIDGE_EBT_IP=m
564CONFIG_BRIDGE_EBT_IP6=m
565CONFIG_BRIDGE_EBT_LIMIT=m
566CONFIG_BRIDGE_EBT_MARK=m
567CONFIG_BRIDGE_EBT_PKTTYPE=m
568CONFIG_BRIDGE_EBT_STP=m
569CONFIG_BRIDGE_EBT_VLAN=m
570CONFIG_BRIDGE_EBT_ARPREPLY=m
571CONFIG_BRIDGE_EBT_DNAT=m
572CONFIG_BRIDGE_EBT_MARK_T=m
573CONFIG_BRIDGE_EBT_REDIRECT=m
574CONFIG_BRIDGE_EBT_SNAT=m
575CONFIG_BRIDGE_EBT_LOG=m
576CONFIG_BRIDGE_EBT_ULOG=m
577CONFIG_BRIDGE_EBT_NFLOG=m
578# CONFIG_IP_DCCP is not set
579CONFIG_IP_SCTP=m
580# CONFIG_SCTP_DBG_MSG is not set
581# CONFIG_SCTP_DBG_OBJCNT is not set
582# CONFIG_SCTP_HMAC_NONE is not set
583# CONFIG_SCTP_HMAC_SHA1 is not set
584CONFIG_SCTP_HMAC_MD5=y
585CONFIG_RDS=m
586CONFIG_RDS_TCP=m
587# CONFIG_RDS_DEBUG is not set
588# CONFIG_TIPC is not set
589# CONFIG_ATM is not set
590# CONFIG_L2TP is not set
591CONFIG_STP=m
592CONFIG_GARP=m
593CONFIG_BRIDGE=m
594CONFIG_BRIDGE_IGMP_SNOOPING=y
595CONFIG_NET_DSA=y
596CONFIG_NET_DSA_TAG_DSA=y
597CONFIG_NET_DSA_TAG_EDSA=y
598CONFIG_NET_DSA_TAG_TRAILER=y
599CONFIG_NET_DSA_MV88E6XXX=y
600CONFIG_NET_DSA_MV88E6060=y
601CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y
602CONFIG_NET_DSA_MV88E6131=y
603CONFIG_NET_DSA_MV88E6123_61_65=y
604CONFIG_VLAN_8021Q=m
605CONFIG_VLAN_8021Q_GVRP=y
606# CONFIG_DECNET is not set
607CONFIG_LLC=m
608# CONFIG_LLC2 is not set
609# CONFIG_IPX is not set
610# CONFIG_ATALK is not set
611# CONFIG_X25 is not set
612# CONFIG_LAPB is not set
613# CONFIG_ECONET is not set
614# CONFIG_WAN_ROUTER is not set
615CONFIG_PHONET=m
616# CONFIG_IEEE802154 is not set
617CONFIG_NET_SCHED=y
618
619#
620# Queueing/Scheduling
621#
622CONFIG_NET_SCH_CBQ=m
623CONFIG_NET_SCH_HTB=m
624CONFIG_NET_SCH_HFSC=m
625CONFIG_NET_SCH_PRIO=m
626CONFIG_NET_SCH_MULTIQ=m
627CONFIG_NET_SCH_RED=m
628# CONFIG_NET_SCH_SFB is not set
629CONFIG_NET_SCH_SFQ=m
630CONFIG_NET_SCH_TEQL=m
631CONFIG_NET_SCH_TBF=m
632CONFIG_NET_SCH_GRED=m
633CONFIG_NET_SCH_DSMARK=m
634CONFIG_NET_SCH_NETEM=m
635CONFIG_NET_SCH_DRR=m
636# CONFIG_NET_SCH_MQPRIO is not set
637# CONFIG_NET_SCH_CHOKE is not set
638CONFIG_NET_SCH_INGRESS=m
639
640#
641# Classification
642#
643CONFIG_NET_CLS=y
644CONFIG_NET_CLS_BASIC=m
645CONFIG_NET_CLS_TCINDEX=m
646CONFIG_NET_CLS_ROUTE4=m
647CONFIG_NET_CLS_FW=m
648CONFIG_NET_CLS_U32=m
649CONFIG_CLS_U32_PERF=y
650CONFIG_CLS_U32_MARK=y
651CONFIG_NET_CLS_RSVP=m
652CONFIG_NET_CLS_RSVP6=m
653CONFIG_NET_CLS_FLOW=m
654CONFIG_NET_CLS_CGROUP=y
655CONFIG_NET_EMATCH=y
656CONFIG_NET_EMATCH_STACK=32
657CONFIG_NET_EMATCH_CMP=m
658CONFIG_NET_EMATCH_NBYTE=m
659CONFIG_NET_EMATCH_U32=m
660CONFIG_NET_EMATCH_META=m
661CONFIG_NET_EMATCH_TEXT=m
662CONFIG_NET_CLS_ACT=y
663CONFIG_NET_ACT_POLICE=m
664CONFIG_NET_ACT_GACT=m
665CONFIG_GACT_PROB=y
666CONFIG_NET_ACT_MIRRED=m
667CONFIG_NET_ACT_IPT=m
668CONFIG_NET_ACT_NAT=m
669CONFIG_NET_ACT_PEDIT=m
670CONFIG_NET_ACT_SIMP=m
671CONFIG_NET_ACT_SKBEDIT=m
672# CONFIG_NET_ACT_CSUM is not set
673CONFIG_NET_CLS_IND=y
674CONFIG_NET_SCH_FIFO=y
675CONFIG_DCB=y
676CONFIG_DNS_RESOLVER=y
677# CONFIG_BATMAN_ADV is not set
678CONFIG_RPS=y
679CONFIG_RFS_ACCEL=y
680CONFIG_XPS=y
681
682#
683# Network testing
684#
685# CONFIG_NET_PKTGEN is not set
686# CONFIG_HAMRADIO is not set
687# CONFIG_CAN is not set
688# CONFIG_IRDA is not set
689# CONFIG_BT is not set
690# CONFIG_AF_RXRPC is not set
691CONFIG_FIB_RULES=y
692# CONFIG_WIRELESS is not set
693# CONFIG_WIMAX is not set
694# CONFIG_RFKILL is not set
695# CONFIG_NET_9P is not set
696# CONFIG_CAIF is not set
697# CONFIG_CEPH_LIB is not set
698
699#
700# Device Drivers
701#
702
703#
704# Generic Driver Options
705#
706CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
707CONFIG_DEVTMPFS=y
708CONFIG_DEVTMPFS_MOUNT=y
709CONFIG_STANDALONE=y
710CONFIG_PREVENT_FIRMWARE_BUILD=y
711CONFIG_FW_LOADER=y
712# CONFIG_FIRMWARE_IN_KERNEL is not set
713CONFIG_EXTRA_FIRMWARE=""
714# CONFIG_DEBUG_DRIVER is not set
715# CONFIG_DEBUG_DEVRES is not set
716# CONFIG_SYS_HYPERVISOR is not set
717CONFIG_CONNECTOR=y
718CONFIG_PROC_EVENTS=y
719# CONFIG_MTD is not set
720# CONFIG_PARPORT is not set
721CONFIG_BLK_DEV=y
722# CONFIG_BLK_CPQ_DA is not set
723# CONFIG_BLK_CPQ_CISS_DA is not set
724# CONFIG_BLK_DEV_DAC960 is not set
725# CONFIG_BLK_DEV_UMEM is not set
726# CONFIG_BLK_DEV_COW_COMMON is not set
727CONFIG_BLK_DEV_LOOP=y
728CONFIG_BLK_DEV_CRYPTOLOOP=m
729# CONFIG_BLK_DEV_DRBD is not set
730# CONFIG_BLK_DEV_NBD is not set
731CONFIG_BLK_DEV_SX8=m
732CONFIG_BLK_DEV_RAM=y
733CONFIG_BLK_DEV_RAM_COUNT=16
734CONFIG_BLK_DEV_RAM_SIZE=16384
735# CONFIG_BLK_DEV_XIP is not set
736# CONFIG_CDROM_PKTCDVD is not set
737CONFIG_ATA_OVER_ETH=y
738# CONFIG_BLK_DEV_RBD is not set
739# CONFIG_SENSORS_LIS3LV02D is not set
740CONFIG_MISC_DEVICES=y
741# CONFIG_AD525X_DPOT is not set
742# CONFIG_PHANTOM is not set
743# CONFIG_SGI_IOC4 is not set
744# CONFIG_TIFM_CORE is not set
745# CONFIG_ICS932S401 is not set
746# CONFIG_ENCLOSURE_SERVICES is not set
747# CONFIG_HP_ILO is not set
748# CONFIG_APDS9802ALS is not set
749# CONFIG_ISL29003 is not set
750# CONFIG_ISL29020 is not set
751# CONFIG_SENSORS_TSL2550 is not set
752# CONFIG_SENSORS_BH1780 is not set
753# CONFIG_SENSORS_BH1770 is not set
754# CONFIG_SENSORS_APDS990X is not set
755# CONFIG_HMC6352 is not set
756# CONFIG_DS1682 is not set
757# CONFIG_BMP085 is not set
758# CONFIG_PCH_PHUB is not set
759# CONFIG_C2PORT is not set
760
761#
762# EEPROM support
763#
764# CONFIG_EEPROM_AT24 is not set
765# CONFIG_EEPROM_LEGACY is not set
766# CONFIG_EEPROM_MAX6875 is not set
767# CONFIG_EEPROM_93CX6 is not set
768# CONFIG_CB710_CORE is not set
769
770#
771# Texas Instruments shared transport line discipline
772#
773# CONFIG_SENSORS_LIS3_I2C is not set
774
775#
776# SCSI device support
777#
778CONFIG_SCSI_MOD=m
779CONFIG_RAID_ATTRS=m
780CONFIG_SCSI=m
781CONFIG_SCSI_DMA=y
782CONFIG_SCSI_TGT=m
783# CONFIG_SCSI_NETLINK is not set
784CONFIG_SCSI_PROC_FS=y
785
786#
787# SCSI support type (disk, tape, CD-ROM)
788#
789CONFIG_BLK_DEV_SD=m
790# CONFIG_CHR_DEV_ST is not set
791# CONFIG_CHR_DEV_OSST is not set
792# CONFIG_BLK_DEV_SR is not set
793# CONFIG_CHR_DEV_SG is not set
794# CONFIG_CHR_DEV_SCH is not set
795# CONFIG_SCSI_MULTI_LUN is not set
796CONFIG_SCSI_CONSTANTS=y
797CONFIG_SCSI_LOGGING=y
798# CONFIG_SCSI_SCAN_ASYNC is not set
799CONFIG_SCSI_WAIT_SCAN=m
800
801#
802# SCSI Transports
803#
804# CONFIG_SCSI_SPI_ATTRS is not set
805# CONFIG_SCSI_FC_ATTRS is not set
806# CONFIG_SCSI_ISCSI_ATTRS is not set
807CONFIG_SCSI_SAS_ATTRS=m
808# CONFIG_SCSI_SAS_LIBSAS is not set
809# CONFIG_SCSI_SRP_ATTRS is not set
810CONFIG_SCSI_LOWLEVEL=y
811# CONFIG_ISCSI_TCP is not set
812# CONFIG_ISCSI_BOOT_SYSFS is not set
813# CONFIG_SCSI_CXGB3_ISCSI is not set
814# CONFIG_SCSI_CXGB4_ISCSI is not set
815# CONFIG_SCSI_BNX2_ISCSI is not set
816# CONFIG_SCSI_BNX2X_FCOE is not set
817# CONFIG_BE2ISCSI is not set
818# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
819# CONFIG_SCSI_HPSA is not set
820# CONFIG_SCSI_3W_9XXX is not set
821# CONFIG_SCSI_3W_SAS is not set
822# CONFIG_SCSI_ACARD is not set
823# CONFIG_SCSI_AACRAID is not set
824# CONFIG_SCSI_AIC7XXX is not set
825# CONFIG_SCSI_AIC7XXX_OLD is not set
826# CONFIG_SCSI_AIC79XX is not set
827# CONFIG_SCSI_AIC94XX is not set
828# CONFIG_SCSI_MVSAS is not set
829# CONFIG_SCSI_DPT_I2O is not set
830# CONFIG_SCSI_ADVANSYS is not set
831# CONFIG_SCSI_ARCMSR is not set
832# CONFIG_MEGARAID_NEWGEN is not set
833# CONFIG_MEGARAID_LEGACY is not set
834# CONFIG_MEGARAID_SAS is not set
835# CONFIG_SCSI_MPT2SAS is not set
836# CONFIG_SCSI_HPTIOP is not set
837# CONFIG_LIBFC is not set
838# CONFIG_LIBFCOE is not set
839# CONFIG_FCOE is not set
840# CONFIG_SCSI_DMX3191D is not set
841# CONFIG_SCSI_FUTURE_DOMAIN is not set
842# CONFIG_SCSI_IPS is not set
843# CONFIG_SCSI_INITIO is not set
844# CONFIG_SCSI_INIA100 is not set
845# CONFIG_SCSI_STEX is not set
846# CONFIG_SCSI_SYM53C8XX_2 is not set
847# CONFIG_SCSI_IPR is not set
848# CONFIG_SCSI_QLOGIC_1280 is not set
849# CONFIG_SCSI_QLA_FC is not set
850# CONFIG_SCSI_QLA_ISCSI is not set
851# CONFIG_SCSI_LPFC is not set
852# CONFIG_SCSI_DC395x is not set
853# CONFIG_SCSI_DC390T is not set
854# CONFIG_SCSI_DEBUG is not set
855# CONFIG_SCSI_PMCRAID is not set
856# CONFIG_SCSI_PM8001 is not set
857# CONFIG_SCSI_SRP is not set
858# CONFIG_SCSI_BFA_FC is not set
859# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
860# CONFIG_SCSI_DH is not set
861# CONFIG_SCSI_OSD_INITIATOR is not set
862CONFIG_ATA=m
863# CONFIG_ATA_NONSTANDARD is not set
864CONFIG_ATA_VERBOSE_ERROR=y
865CONFIG_SATA_PMP=y
866
867#
868# Controllers with non-SFF native interface
869#
870# CONFIG_SATA_AHCI is not set
871# CONFIG_SATA_AHCI_PLATFORM is not set
872# CONFIG_SATA_INIC162X is not set
873# CONFIG_SATA_ACARD_AHCI is not set
874CONFIG_SATA_SIL24=m
875CONFIG_ATA_SFF=y
876
877#
878# SFF controllers with custom DMA interface
879#
880# CONFIG_PDC_ADMA is not set
881# CONFIG_SATA_QSTOR is not set
882# CONFIG_SATA_SX4 is not set
883CONFIG_ATA_BMDMA=y
884
885#
886# SATA SFF controllers with BMDMA
887#
888# CONFIG_ATA_PIIX is not set
889# CONFIG_SATA_MV is not set
890# CONFIG_SATA_NV is not set
891# CONFIG_SATA_PROMISE is not set
892# CONFIG_SATA_SIL is not set
893# CONFIG_SATA_SIS is not set
894# CONFIG_SATA_SVW is not set
895# CONFIG_SATA_ULI is not set
896# CONFIG_SATA_VIA is not set
897# CONFIG_SATA_VITESSE is not set
898
899#
900# PATA SFF controllers with BMDMA
901#
902# CONFIG_PATA_ALI is not set
903# CONFIG_PATA_AMD is not set
904# CONFIG_PATA_ARASAN_CF is not set
905# CONFIG_PATA_ARTOP is not set
906# CONFIG_PATA_ATIIXP is not set
907# CONFIG_PATA_ATP867X is not set
908# CONFIG_PATA_CMD64X is not set
909# CONFIG_PATA_CS5520 is not set
910# CONFIG_PATA_CS5530 is not set
911# CONFIG_PATA_CS5536 is not set
912# CONFIG_PATA_CYPRESS is not set
913# CONFIG_PATA_EFAR is not set
914# CONFIG_PATA_HPT366 is not set
915# CONFIG_PATA_HPT37X is not set
916# CONFIG_PATA_HPT3X2N is not set
917# CONFIG_PATA_HPT3X3 is not set
918# CONFIG_PATA_IT8213 is not set
919# CONFIG_PATA_IT821X is not set
920# CONFIG_PATA_JMICRON is not set
921# CONFIG_PATA_MARVELL is not set
922# CONFIG_PATA_NETCELL is not set
923# CONFIG_PATA_NINJA32 is not set
924# CONFIG_PATA_NS87415 is not set
925# CONFIG_PATA_OLDPIIX is not set
926# CONFIG_PATA_OPTIDMA is not set
927# CONFIG_PATA_PDC2027X is not set
928# CONFIG_PATA_PDC_OLD is not set
929# CONFIG_PATA_RADISYS is not set
930# CONFIG_PATA_RDC is not set
931# CONFIG_PATA_SC1200 is not set
932# CONFIG_PATA_SCH is not set
933# CONFIG_PATA_SERVERWORKS is not set
934# CONFIG_PATA_SIL680 is not set
935# CONFIG_PATA_SIS is not set
936# CONFIG_PATA_TOSHIBA is not set
937# CONFIG_PATA_TRIFLEX is not set
938# CONFIG_PATA_VIA is not set
939# CONFIG_PATA_WINBOND is not set
940
941#
942# PIO-only SFF controllers
943#
944# CONFIG_PATA_CMD640_PCI is not set
945# CONFIG_PATA_MPIIX is not set
946# CONFIG_PATA_NS87410 is not set
947# CONFIG_PATA_OPTI is not set
948# CONFIG_PATA_PLATFORM is not set
949# CONFIG_PATA_RZ1000 is not set
950
951#
952# Generic fallback / legacy drivers
953#
954# CONFIG_ATA_GENERIC is not set
955# CONFIG_PATA_LEGACY is not set
956CONFIG_MD=y
957CONFIG_BLK_DEV_MD=y
958CONFIG_MD_AUTODETECT=y
959CONFIG_MD_LINEAR=m
960CONFIG_MD_RAID0=m
961CONFIG_MD_RAID1=m
962CONFIG_MD_RAID10=m
963CONFIG_MD_RAID456=m
964CONFIG_MULTICORE_RAID456=y
965# CONFIG_MD_MULTIPATH is not set
966CONFIG_MD_FAULTY=m
967CONFIG_BLK_DEV_DM=m
968CONFIG_DM_DEBUG=y
969CONFIG_DM_CRYPT=m
970CONFIG_DM_SNAPSHOT=m
971CONFIG_DM_MIRROR=m
972# CONFIG_DM_RAID is not set
973CONFIG_DM_LOG_USERSPACE=m
974CONFIG_DM_ZERO=m
975CONFIG_DM_MULTIPATH=m
976CONFIG_DM_MULTIPATH_QL=m
977CONFIG_DM_MULTIPATH_ST=m
978CONFIG_DM_DELAY=m
979CONFIG_DM_UEVENT=y
980# CONFIG_DM_FLAKEY is not set
981# CONFIG_TARGET_CORE is not set
982# CONFIG_FUSION is not set
983
984#
985# IEEE 1394 (FireWire) support
986#
987# CONFIG_FIREWIRE is not set
988# CONFIG_FIREWIRE_NOSY is not set
989# CONFIG_I2O is not set
990CONFIG_NETDEVICES=y
991CONFIG_IFB=m
992CONFIG_DUMMY=m
993CONFIG_BONDING=m
994CONFIG_MACVLAN=m
995CONFIG_MACVTAP=m
996# CONFIG_EQUALIZER is not set
997CONFIG_TUN=y
998CONFIG_VETH=m
999# CONFIG_ARCNET is not set
1000# CONFIG_MII is not set
1001CONFIG_PHYLIB=y
1002
1003#
1004# MII PHY device drivers
1005#
1006# CONFIG_MARVELL_PHY is not set
1007# CONFIG_DAVICOM_PHY is not set
1008# CONFIG_QSEMI_PHY is not set
1009# CONFIG_LXT_PHY is not set
1010# CONFIG_CICADA_PHY is not set
1011# CONFIG_VITESSE_PHY is not set
1012# CONFIG_SMSC_PHY is not set
1013# CONFIG_BROADCOM_PHY is not set
1014# CONFIG_BCM63XX_PHY is not set
1015# CONFIG_ICPLUS_PHY is not set
1016# CONFIG_REALTEK_PHY is not set
1017# CONFIG_NATIONAL_PHY is not set
1018# CONFIG_STE10XP is not set
1019# CONFIG_LSI_ET1011C_PHY is not set
1020# CONFIG_MICREL_PHY is not set
1021# CONFIG_FIXED_PHY is not set
1022# CONFIG_MDIO_BITBANG is not set
1023# CONFIG_NET_ETHERNET is not set
1024CONFIG_NETDEV_1000=y
1025# CONFIG_ACENIC is not set
1026# CONFIG_DL2K is not set
1027# CONFIG_E1000 is not set
1028CONFIG_E1000E=m
1029# CONFIG_IP1000 is not set
1030# CONFIG_IGB is not set
1031# CONFIG_IGBVF is not set
1032# CONFIG_NS83820 is not set
1033# CONFIG_HAMACHI is not set
1034# CONFIG_YELLOWFIN is not set
1035# CONFIG_R8169 is not set
1036# CONFIG_SIS190 is not set
1037# CONFIG_SKGE is not set
1038# CONFIG_SKY2 is not set
1039# CONFIG_VIA_VELOCITY is not set
1040# CONFIG_TIGON3 is not set
1041# CONFIG_BNX2 is not set
1042# CONFIG_CNIC is not set
1043# CONFIG_QLA3XXX is not set
1044# CONFIG_ATL1 is not set
1045# CONFIG_ATL1E is not set
1046# CONFIG_ATL1C is not set
1047# CONFIG_JME is not set
1048# CONFIG_STMMAC_ETH is not set
1049# CONFIG_PCH_GBE is not set
1050# CONFIG_NETDEV_10000 is not set
1051# CONFIG_TR is not set
1052# CONFIG_WLAN is not set
1053
1054#
1055# Enable WiMAX (Networking options) to see the WiMAX drivers
1056#
1057# CONFIG_WAN is not set
1058
1059#
1060# CAIF transport drivers
1061#
1062# CONFIG_TILE_NET is not set
1063# CONFIG_FDDI is not set
1064# CONFIG_HIPPI is not set
1065# CONFIG_PPP is not set
1066# CONFIG_SLIP is not set
1067# CONFIG_NET_FC is not set
1068# CONFIG_NETCONSOLE is not set
1069# CONFIG_NETPOLL is not set
1070# CONFIG_NET_POLL_CONTROLLER is not set
1071# CONFIG_VMXNET3 is not set
1072# CONFIG_ISDN is not set
1073# CONFIG_PHONE is not set
1074
1075#
1076# Input device support
1077#
1078CONFIG_INPUT=y
1079# CONFIG_INPUT_FF_MEMLESS is not set
1080# CONFIG_INPUT_POLLDEV is not set
1081# CONFIG_INPUT_SPARSEKMAP is not set
1082
1083#
1084# Userland interfaces
1085#
1086# CONFIG_INPUT_MOUSEDEV is not set
1087# CONFIG_INPUT_JOYDEV is not set
1088# CONFIG_INPUT_EVDEV is not set
1089# CONFIG_INPUT_EVBUG is not set
1090
1091#
1092# Input Device Drivers
1093#
1094# CONFIG_INPUT_KEYBOARD is not set
1095# CONFIG_INPUT_MOUSE is not set
1096# CONFIG_INPUT_JOYSTICK is not set
1097# CONFIG_INPUT_TABLET is not set
1098# CONFIG_INPUT_TOUCHSCREEN is not set
1099# CONFIG_INPUT_MISC is not set
1100
1101#
1102# Hardware I/O ports
1103#
1104# CONFIG_SERIO is not set
1105# CONFIG_GAMEPORT is not set
1106
1107#
1108# Character devices
1109#
1110# CONFIG_VT is not set
1111CONFIG_UNIX98_PTYS=y
1112# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1113# CONFIG_LEGACY_PTYS is not set
1114# CONFIG_SERIAL_NONSTANDARD is not set
1115# CONFIG_NOZOMI is not set
1116# CONFIG_N_GSM is not set
1117CONFIG_DEVKMEM=y
1118
1119#
1120# Serial drivers
1121#
1122# CONFIG_SERIAL_8250 is not set
1123
1124#
1125# Non-8250 serial port support
1126#
1127# CONFIG_SERIAL_MFD_HSU is not set
1128# CONFIG_SERIAL_JSM is not set
1129# CONFIG_SERIAL_TIMBERDALE is not set
1130# CONFIG_SERIAL_ALTERA_JTAGUART is not set
1131# CONFIG_SERIAL_ALTERA_UART is not set
1132# CONFIG_SERIAL_PCH_UART is not set
1133# CONFIG_TTY_PRINTK is not set
1134CONFIG_HVC_DRIVER=y
1135# CONFIG_IPMI_HANDLER is not set
1136CONFIG_HW_RANDOM=y
1137CONFIG_HW_RANDOM_TIMERIOMEM=m
1138# CONFIG_R3964 is not set
1139# CONFIG_APPLICOM is not set
1140
1141#
1142# PCMCIA character devices
1143#
1144# CONFIG_RAW_DRIVER is not set
1145# CONFIG_TCG_TPM is not set
1146CONFIG_DEVPORT=y
1147# CONFIG_RAMOOPS is not set
1148CONFIG_I2C=y
1149CONFIG_I2C_BOARDINFO=y
1150CONFIG_I2C_COMPAT=y
1151CONFIG_I2C_CHARDEV=y
1152# CONFIG_I2C_MUX is not set
1153CONFIG_I2C_HELPER_AUTO=y
1154
1155#
1156# I2C Hardware Bus support
1157#
1158
1159#
1160# PC SMBus host controller drivers
1161#
1162# CONFIG_I2C_ALI1535 is not set
1163# CONFIG_I2C_ALI1563 is not set
1164# CONFIG_I2C_ALI15X3 is not set
1165# CONFIG_I2C_AMD756 is not set
1166# CONFIG_I2C_AMD8111 is not set
1167# CONFIG_I2C_I801 is not set
1168# CONFIG_I2C_ISCH is not set
1169# CONFIG_I2C_PIIX4 is not set
1170# CONFIG_I2C_NFORCE2 is not set
1171# CONFIG_I2C_SIS5595 is not set
1172# CONFIG_I2C_SIS630 is not set
1173# CONFIG_I2C_SIS96X is not set
1174# CONFIG_I2C_VIA is not set
1175# CONFIG_I2C_VIAPRO is not set
1176
1177#
1178# I2C system bus drivers (mostly embedded / system-on-chip)
1179#
1180# CONFIG_I2C_INTEL_MID is not set
1181# CONFIG_I2C_OCORES is not set
1182# CONFIG_I2C_PCA_PLATFORM is not set
1183# CONFIG_I2C_PXA_PCI is not set
1184# CONFIG_I2C_SIMTEC is not set
1185# CONFIG_I2C_XILINX is not set
1186# CONFIG_I2C_EG20T is not set
1187
1188#
1189# External I2C/SMBus adapter drivers
1190#
1191# CONFIG_I2C_PARPORT_LIGHT is not set
1192# CONFIG_I2C_TAOS_EVM is not set
1193
1194#
1195# Other I2C/SMBus bus drivers
1196#
1197# CONFIG_I2C_STUB is not set
1198# CONFIG_I2C_DEBUG_CORE is not set
1199# CONFIG_I2C_DEBUG_ALGO is not set
1200# CONFIG_I2C_DEBUG_BUS is not set
1201# CONFIG_SPI is not set
1202
1203#
1204# PPS support
1205#
1206# CONFIG_PPS is not set
1207
1208#
1209# PPS generators support
1210#
1211# CONFIG_W1 is not set
1212# CONFIG_POWER_SUPPLY is not set
1213# CONFIG_HWMON is not set
1214# CONFIG_THERMAL is not set
1215# CONFIG_WATCHDOG is not set
1216CONFIG_SSB_POSSIBLE=y
1217
1218#
1219# Sonics Silicon Backplane
1220#
1221# CONFIG_SSB is not set
1222CONFIG_MFD_SUPPORT=y
1223# CONFIG_MFD_CORE is not set
1224# CONFIG_MFD_88PM860X is not set
1225# CONFIG_MFD_SM501 is not set
1226# CONFIG_HTC_PASIC3 is not set
1227# CONFIG_TPS6105X is not set
1228# CONFIG_TPS6507X is not set
1229# CONFIG_TWL4030_CORE is not set
1230# CONFIG_MFD_STMPE is not set
1231# CONFIG_MFD_TC3589X is not set
1232# CONFIG_MFD_TMIO is not set
1233# CONFIG_PMIC_DA903X is not set
1234# CONFIG_PMIC_ADP5520 is not set
1235# CONFIG_MFD_MAX8925 is not set
1236# CONFIG_MFD_MAX8997 is not set
1237# CONFIG_MFD_MAX8998 is not set
1238# CONFIG_MFD_WM8400 is not set
1239# CONFIG_MFD_WM831X_I2C is not set
1240# CONFIG_MFD_WM8350_I2C is not set
1241# CONFIG_MFD_WM8994 is not set
1242# CONFIG_MFD_PCF50633 is not set
1243# CONFIG_ABX500_CORE is not set
1244# CONFIG_LPC_SCH is not set
1245# CONFIG_MFD_RDC321X is not set
1246# CONFIG_MFD_JANZ_CMODIO is not set
1247# CONFIG_MFD_VX855 is not set
1248# CONFIG_MFD_WL1273_CORE is not set
1249# CONFIG_REGULATOR is not set
1250# CONFIG_MEDIA_SUPPORT is not set
1251
1252#
1253# Graphics support
1254#
1255# CONFIG_VGA_ARB is not set
1256# CONFIG_DRM is not set
1257# CONFIG_STUB_POULSBO is not set
1258# CONFIG_VGASTATE is not set
1259# CONFIG_VIDEO_OUTPUT_CONTROL is not set
1260# CONFIG_FB is not set
1261# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
1262
1263#
1264# Display device support
1265#
1266# CONFIG_DISPLAY_SUPPORT is not set
1267# CONFIG_SOUND is not set
1268# CONFIG_HID_SUPPORT is not set
1269# CONFIG_USB_SUPPORT is not set
1270# CONFIG_UWB is not set
1271# CONFIG_MMC is not set
1272# CONFIG_MEMSTICK is not set
1273# CONFIG_NEW_LEDS is not set
1274# CONFIG_NFC_DEVICES is not set
1275# CONFIG_ACCESSIBILITY is not set
1276# CONFIG_INFINIBAND is not set
1277# CONFIG_EDAC is not set
1278CONFIG_RTC_LIB=y
1279CONFIG_RTC_CLASS=y
1280CONFIG_RTC_HCTOSYS=y
1281CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1282# CONFIG_RTC_DEBUG is not set
1283
1284#
1285# RTC interfaces
1286#
1287CONFIG_RTC_INTF_SYSFS=y
1288CONFIG_RTC_INTF_PROC=y
1289CONFIG_RTC_INTF_DEV=y
1290# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1291# CONFIG_RTC_DRV_TEST is not set
1292
1293#
1294# I2C RTC drivers
1295#
1296# CONFIG_RTC_DRV_DS1307 is not set
1297# CONFIG_RTC_DRV_DS1374 is not set
1298# CONFIG_RTC_DRV_DS1672 is not set
1299# CONFIG_RTC_DRV_DS3232 is not set
1300# CONFIG_RTC_DRV_MAX6900 is not set
1301# CONFIG_RTC_DRV_RS5C372 is not set
1302# CONFIG_RTC_DRV_ISL1208 is not set
1303# CONFIG_RTC_DRV_ISL12022 is not set
1304# CONFIG_RTC_DRV_X1205 is not set
1305# CONFIG_RTC_DRV_PCF8563 is not set
1306# CONFIG_RTC_DRV_PCF8583 is not set
1307# CONFIG_RTC_DRV_M41T80 is not set
1308# CONFIG_RTC_DRV_BQ32K is not set
1309# CONFIG_RTC_DRV_S35390A is not set
1310# CONFIG_RTC_DRV_FM3130 is not set
1311# CONFIG_RTC_DRV_RX8581 is not set
1312# CONFIG_RTC_DRV_RX8025 is not set
1313
1314#
1315# SPI RTC drivers
1316#
1317
1318#
1319# Platform RTC drivers
1320#
1321# CONFIG_RTC_DRV_DS1286 is not set
1322# CONFIG_RTC_DRV_DS1511 is not set
1323# CONFIG_RTC_DRV_DS1553 is not set
1324# CONFIG_RTC_DRV_DS1742 is not set
1325# CONFIG_RTC_DRV_STK17TA8 is not set
1326# CONFIG_RTC_DRV_M48T86 is not set
1327# CONFIG_RTC_DRV_M48T35 is not set
1328# CONFIG_RTC_DRV_M48T59 is not set
1329# CONFIG_RTC_DRV_MSM6242 is not set
1330# CONFIG_RTC_DRV_BQ4802 is not set
1331# CONFIG_RTC_DRV_RP5C01 is not set
1332# CONFIG_RTC_DRV_V3020 is not set
1333
1334#
1335# on-CPU RTC drivers
1336#
1337CONFIG_RTC_DRV_TILE=y
1338# CONFIG_DMADEVICES is not set
1339# CONFIG_AUXDISPLAY is not set
1340# CONFIG_UIO is not set
1341# CONFIG_STAGING is not set
1342
1343#
1344# File systems
1345#
1346CONFIG_EXT2_FS=y
1347CONFIG_EXT2_FS_XATTR=y
1348CONFIG_EXT2_FS_POSIX_ACL=y
1349CONFIG_EXT2_FS_SECURITY=y
1350CONFIG_EXT2_FS_XIP=y
1351CONFIG_EXT3_FS=y
1352CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
1353CONFIG_EXT3_FS_XATTR=y
1354CONFIG_EXT3_FS_POSIX_ACL=y
1355CONFIG_EXT3_FS_SECURITY=y
1356CONFIG_EXT4_FS=y
1357CONFIG_EXT4_FS_XATTR=y
1358CONFIG_EXT4_FS_POSIX_ACL=y
1359CONFIG_EXT4_FS_SECURITY=y
1360# CONFIG_EXT4_DEBUG is not set
1361CONFIG_FS_XIP=y
1362CONFIG_JBD=y
1363# CONFIG_JBD_DEBUG is not set
1364CONFIG_JBD2=y
1365CONFIG_JBD2_DEBUG=y
1366CONFIG_FS_MBCACHE=y
1367# CONFIG_REISERFS_FS is not set
1368# CONFIG_JFS_FS is not set
1369CONFIG_XFS_FS=m
1370CONFIG_XFS_QUOTA=y
1371CONFIG_XFS_POSIX_ACL=y
1372# CONFIG_XFS_RT is not set
1373# CONFIG_XFS_DEBUG is not set
1374CONFIG_GFS2_FS=m
1375CONFIG_GFS2_FS_LOCKING_DLM=y
1376# CONFIG_OCFS2_FS is not set
1377CONFIG_BTRFS_FS=m
1378CONFIG_BTRFS_FS_POSIX_ACL=y
1379# CONFIG_NILFS2_FS is not set
1380CONFIG_FS_POSIX_ACL=y
1381CONFIG_EXPORTFS=y
1382CONFIG_FILE_LOCKING=y
1383CONFIG_FSNOTIFY=y
1384CONFIG_DNOTIFY=y
1385CONFIG_INOTIFY_USER=y
1386# CONFIG_FANOTIFY is not set
1387CONFIG_QUOTA=y
1388CONFIG_QUOTA_NETLINK_INTERFACE=y
1389# CONFIG_PRINT_QUOTA_WARNING is not set
1390# CONFIG_QUOTA_DEBUG is not set
1391CONFIG_QUOTA_TREE=y
1392# CONFIG_QFMT_V1 is not set
1393CONFIG_QFMT_V2=y
1394CONFIG_QUOTACTL=y
1395# CONFIG_AUTOFS4_FS is not set
1396CONFIG_FUSE_FS=y
1397CONFIG_CUSE=m
1398CONFIG_GENERIC_ACL=y
1399
1400#
1401# Caches
1402#
1403CONFIG_FSCACHE=m
1404CONFIG_FSCACHE_STATS=y
1405# CONFIG_FSCACHE_HISTOGRAM is not set
1406# CONFIG_FSCACHE_DEBUG is not set
1407# CONFIG_FSCACHE_OBJECT_LIST is not set
1408CONFIG_CACHEFILES=m
1409# CONFIG_CACHEFILES_DEBUG is not set
1410# CONFIG_CACHEFILES_HISTOGRAM is not set
1411
1412#
1413# CD-ROM/DVD Filesystems
1414#
1415CONFIG_ISO9660_FS=m
1416CONFIG_JOLIET=y
1417CONFIG_ZISOFS=y
1418CONFIG_UDF_FS=m
1419CONFIG_UDF_NLS=y
1420
1421#
1422# DOS/FAT/NT Filesystems
1423#
1424CONFIG_FAT_FS=m
1425CONFIG_MSDOS_FS=m
1426CONFIG_VFAT_FS=m
1427CONFIG_FAT_DEFAULT_CODEPAGE=437
1428CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
1429# CONFIG_NTFS_FS is not set
1430
1431#
1432# Pseudo filesystems
1433#
1434CONFIG_PROC_FS=y
1435CONFIG_PROC_KCORE=y
1436CONFIG_PROC_SYSCTL=y
1437CONFIG_PROC_PAGE_MONITOR=y
1438CONFIG_SYSFS=y
1439CONFIG_TMPFS=y
1440CONFIG_TMPFS_POSIX_ACL=y
1441CONFIG_HUGETLBFS=y
1442CONFIG_HUGETLB_PAGE=y
1443CONFIG_CONFIGFS_FS=m
1444CONFIG_MISC_FILESYSTEMS=y
1445# CONFIG_ADFS_FS is not set
1446# CONFIG_AFFS_FS is not set
1447CONFIG_ECRYPT_FS=m
1448# CONFIG_HFS_FS is not set
1449# CONFIG_HFSPLUS_FS is not set
1450# CONFIG_BEFS_FS is not set
1451# CONFIG_BFS_FS is not set
1452# CONFIG_EFS_FS is not set
1453# CONFIG_LOGFS is not set
1454CONFIG_CRAMFS=m
1455CONFIG_SQUASHFS=m
1456# CONFIG_SQUASHFS_XATTR is not set
1457# CONFIG_SQUASHFS_LZO is not set
1458# CONFIG_SQUASHFS_XZ is not set
1459# CONFIG_SQUASHFS_EMBEDDED is not set
1460CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
1461# CONFIG_VXFS_FS is not set
1462# CONFIG_MINIX_FS is not set
1463# CONFIG_OMFS_FS is not set
1464# CONFIG_HPFS_FS is not set
1465# CONFIG_QNX4FS_FS is not set
1466# CONFIG_ROMFS_FS is not set
1467# CONFIG_PSTORE is not set
1468# CONFIG_SYSV_FS is not set
1469# CONFIG_UFS_FS is not set
1470CONFIG_NETWORK_FILESYSTEMS=y
1471CONFIG_NFS_FS=m
1472CONFIG_NFS_V3=y
1473CONFIG_NFS_V3_ACL=y
1474CONFIG_NFS_V4=y
1475CONFIG_NFS_V4_1=y
1476CONFIG_PNFS_FILE_LAYOUT=m
1477CONFIG_NFS_FSCACHE=y
1478# CONFIG_NFS_USE_LEGACY_DNS is not set
1479CONFIG_NFS_USE_KERNEL_DNS=y
1480# CONFIG_NFS_USE_NEW_IDMAPPER is not set
1481CONFIG_NFSD=m
1482CONFIG_NFSD_DEPRECATED=y
1483CONFIG_NFSD_V2_ACL=y
1484CONFIG_NFSD_V3=y
1485CONFIG_NFSD_V3_ACL=y
1486CONFIG_NFSD_V4=y
1487CONFIG_LOCKD=m
1488CONFIG_LOCKD_V4=y
1489CONFIG_NFS_ACL_SUPPORT=m
1490CONFIG_NFS_COMMON=y
1491CONFIG_SUNRPC=m
1492CONFIG_SUNRPC_GSS=m
1493CONFIG_RPCSEC_GSS_KRB5=m
1494# CONFIG_CEPH_FS is not set
1495CONFIG_CIFS=m
1496CONFIG_CIFS_STATS=y
1497# CONFIG_CIFS_STATS2 is not set
1498CONFIG_CIFS_WEAK_PW_HASH=y
1499CONFIG_CIFS_UPCALL=y
1500CONFIG_CIFS_XATTR=y
1501CONFIG_CIFS_POSIX=y
1502# CONFIG_CIFS_DEBUG2 is not set
1503CONFIG_CIFS_DFS_UPCALL=y
1504CONFIG_CIFS_FSCACHE=y
1505# CONFIG_CIFS_ACL is not set
1506CONFIG_CIFS_EXPERIMENTAL=y
1507# CONFIG_NCP_FS is not set
1508# CONFIG_CODA_FS is not set
1509# CONFIG_AFS_FS is not set
1510
1511#
1512# Partition Types
1513#
1514CONFIG_PARTITION_ADVANCED=y
1515# CONFIG_ACORN_PARTITION is not set
1516CONFIG_OSF_PARTITION=y
1517CONFIG_AMIGA_PARTITION=y
1518# CONFIG_ATARI_PARTITION is not set
1519CONFIG_MAC_PARTITION=y
1520CONFIG_MSDOS_PARTITION=y
1521CONFIG_BSD_DISKLABEL=y
1522CONFIG_MINIX_SUBPARTITION=y
1523CONFIG_SOLARIS_X86_PARTITION=y
1524CONFIG_UNIXWARE_DISKLABEL=y
1525# CONFIG_LDM_PARTITION is not set
1526CONFIG_SGI_PARTITION=y
1527# CONFIG_ULTRIX_PARTITION is not set
1528CONFIG_SUN_PARTITION=y
1529CONFIG_KARMA_PARTITION=y
1530CONFIG_EFI_PARTITION=y
1531# CONFIG_SYSV68_PARTITION is not set
1532CONFIG_NLS=y
1533CONFIG_NLS_DEFAULT="utf8"
1534CONFIG_NLS_CODEPAGE_437=y
1535CONFIG_NLS_CODEPAGE_737=m
1536CONFIG_NLS_CODEPAGE_775=m
1537CONFIG_NLS_CODEPAGE_850=m
1538CONFIG_NLS_CODEPAGE_852=m
1539CONFIG_NLS_CODEPAGE_855=m
1540CONFIG_NLS_CODEPAGE_857=m
1541CONFIG_NLS_CODEPAGE_860=m
1542CONFIG_NLS_CODEPAGE_861=m
1543CONFIG_NLS_CODEPAGE_862=m
1544CONFIG_NLS_CODEPAGE_863=m
1545CONFIG_NLS_CODEPAGE_864=m
1546CONFIG_NLS_CODEPAGE_865=m
1547CONFIG_NLS_CODEPAGE_866=m
1548CONFIG_NLS_CODEPAGE_869=m
1549CONFIG_NLS_CODEPAGE_936=m
1550CONFIG_NLS_CODEPAGE_950=m
1551CONFIG_NLS_CODEPAGE_932=m
1552CONFIG_NLS_CODEPAGE_949=m
1553CONFIG_NLS_CODEPAGE_874=m
1554CONFIG_NLS_ISO8859_8=m
1555CONFIG_NLS_CODEPAGE_1250=m
1556CONFIG_NLS_CODEPAGE_1251=m
1557CONFIG_NLS_ASCII=y
1558CONFIG_NLS_ISO8859_1=m
1559CONFIG_NLS_ISO8859_2=m
1560CONFIG_NLS_ISO8859_3=m
1561CONFIG_NLS_ISO8859_4=m
1562CONFIG_NLS_ISO8859_5=m
1563CONFIG_NLS_ISO8859_6=m
1564CONFIG_NLS_ISO8859_7=m
1565CONFIG_NLS_ISO8859_9=m
1566CONFIG_NLS_ISO8859_13=m
1567CONFIG_NLS_ISO8859_14=m
1568CONFIG_NLS_ISO8859_15=m
1569CONFIG_NLS_KOI8_R=m
1570CONFIG_NLS_KOI8_U=m
1571CONFIG_NLS_UTF8=m
1572CONFIG_DLM=m
1573CONFIG_DLM_DEBUG=y
1574
1575#
1576# Kernel hacking
1577#
1578# CONFIG_PRINTK_TIME is not set
1579CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
1580# CONFIG_ENABLE_WARN_DEPRECATED is not set
1581CONFIG_ENABLE_MUST_CHECK=y
1582CONFIG_FRAME_WARN=2048
1583CONFIG_MAGIC_SYSRQ=y
1584CONFIG_STRIP_ASM_SYMS=y
1585# CONFIG_UNUSED_SYMBOLS is not set
1586CONFIG_DEBUG_FS=y
1587CONFIG_HEADERS_CHECK=y
1588# CONFIG_DEBUG_SECTION_MISMATCH is not set
1589CONFIG_DEBUG_KERNEL=y
1590CONFIG_DEBUG_SHIRQ=y
1591CONFIG_LOCKUP_DETECTOR=y
1592# CONFIG_HARDLOCKUP_DETECTOR is not set
1593# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
1594CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
1595# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1596CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1597CONFIG_DETECT_HUNG_TASK=y
1598# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1599CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1600CONFIG_SCHED_DEBUG=y
1601CONFIG_SCHEDSTATS=y
1602CONFIG_TIMER_STATS=y
1603# CONFIG_DEBUG_OBJECTS is not set
1604# CONFIG_SLUB_DEBUG_ON is not set
1605# CONFIG_SLUB_STATS is not set
1606# CONFIG_DEBUG_KMEMLEAK is not set
1607# CONFIG_DEBUG_RT_MUTEXES is not set
1608# CONFIG_RT_MUTEX_TESTER is not set
1609# CONFIG_DEBUG_SPINLOCK is not set
1610# CONFIG_DEBUG_MUTEXES is not set
1611# CONFIG_DEBUG_LOCK_ALLOC is not set
1612# CONFIG_PROVE_LOCKING is not set
1613# CONFIG_SPARSE_RCU_POINTER is not set
1614# CONFIG_LOCK_STAT is not set
1615CONFIG_DEBUG_SPINLOCK_SLEEP=y
1616# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1617CONFIG_STACKTRACE=y
1618# CONFIG_DEBUG_KOBJECT is not set
1619CONFIG_DEBUG_INFO=y
1620CONFIG_DEBUG_INFO_REDUCED=y
1621CONFIG_DEBUG_VM=y
1622# CONFIG_DEBUG_WRITECOUNT is not set
1623CONFIG_DEBUG_MEMORY_INIT=y
1624CONFIG_DEBUG_LIST=y
1625# CONFIG_TEST_LIST_SORT is not set
1626# CONFIG_DEBUG_SG is not set
1627# CONFIG_DEBUG_NOTIFIERS is not set
1628CONFIG_DEBUG_CREDENTIALS=y
1629# CONFIG_RCU_TORTURE_TEST is not set
1630# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1631# CONFIG_BACKTRACE_SELF_TEST is not set
1632# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1633CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
1634# CONFIG_LKDTM is not set
1635# CONFIG_FAULT_INJECTION is not set
1636# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1637# CONFIG_DEBUG_PAGEALLOC is not set
1638CONFIG_TRACING_SUPPORT=y
1639CONFIG_FTRACE=y
1640# CONFIG_IRQSOFF_TRACER is not set
1641# CONFIG_SCHED_TRACER is not set
1642# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1643CONFIG_BRANCH_PROFILE_NONE=y
1644# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1645# CONFIG_PROFILE_ALL_BRANCHES is not set
1646# CONFIG_BLK_DEV_IO_TRACE is not set
1647# CONFIG_BUILD_DOCSRC is not set
1648CONFIG_DYNAMIC_DEBUG=y
1649# CONFIG_ATOMIC64_SELFTEST is not set
1650CONFIG_ASYNC_RAID6_TEST=m
1651# CONFIG_SAMPLES is not set
1652# CONFIG_TEST_KSTRTOX is not set
1653CONFIG_EARLY_PRINTK=y
1654CONFIG_DEBUG_STACKOVERFLOW=y
1655# CONFIG_DEBUG_STACK_USAGE is not set
1656CONFIG_DEBUG_EXTRA_FLAGS=""
1657
1658#
1659# Security options
1660#
1661CONFIG_KEYS=y
1662CONFIG_KEYS_DEBUG_PROC_KEYS=y
1663# CONFIG_SECURITY_DMESG_RESTRICT is not set
1664CONFIG_SECURITY=y
1665CONFIG_SECURITYFS=y
1666CONFIG_SECURITY_NETWORK=y
1667CONFIG_SECURITY_NETWORK_XFRM=y
1668# CONFIG_SECURITY_PATH is not set
1669CONFIG_LSM_MMAP_MIN_ADDR=65536
1670CONFIG_SECURITY_SELINUX=y
1671CONFIG_SECURITY_SELINUX_BOOTPARAM=y
1672CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
1673CONFIG_SECURITY_SELINUX_DISABLE=y
1674CONFIG_SECURITY_SELINUX_DEVELOP=y
1675CONFIG_SECURITY_SELINUX_AVC_STATS=y
1676CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
1677# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
1678# CONFIG_SECURITY_SMACK is not set
1679# CONFIG_SECURITY_TOMOYO is not set
1680# CONFIG_SECURITY_APPARMOR is not set
1681# CONFIG_IMA is not set
1682CONFIG_DEFAULT_SECURITY_SELINUX=y
1683# CONFIG_DEFAULT_SECURITY_DAC is not set
1684CONFIG_DEFAULT_SECURITY="selinux"
1685CONFIG_XOR_BLOCKS=m
1686CONFIG_ASYNC_CORE=m
1687CONFIG_ASYNC_MEMCPY=m
1688CONFIG_ASYNC_XOR=m
1689CONFIG_ASYNC_PQ=m
1690CONFIG_ASYNC_RAID6_RECOV=m
1691CONFIG_CRYPTO=y
1692
1693#
1694# Crypto core or helper
1695#
1696CONFIG_CRYPTO_ALGAPI=y
1697CONFIG_CRYPTO_ALGAPI2=y
1698CONFIG_CRYPTO_AEAD=m
1699CONFIG_CRYPTO_AEAD2=y
1700CONFIG_CRYPTO_BLKCIPHER=m
1701CONFIG_CRYPTO_BLKCIPHER2=y
1702CONFIG_CRYPTO_HASH=y
1703CONFIG_CRYPTO_HASH2=y
1704CONFIG_CRYPTO_RNG=m
1705CONFIG_CRYPTO_RNG2=y
1706CONFIG_CRYPTO_PCOMP=m
1707CONFIG_CRYPTO_PCOMP2=y
1708CONFIG_CRYPTO_MANAGER=y
1709CONFIG_CRYPTO_MANAGER2=y
1710CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
1711CONFIG_CRYPTO_GF128MUL=m
1712CONFIG_CRYPTO_NULL=m
1713CONFIG_CRYPTO_PCRYPT=m
1714CONFIG_CRYPTO_WORKQUEUE=y
1715CONFIG_CRYPTO_CRYPTD=m
1716CONFIG_CRYPTO_AUTHENC=m
1717CONFIG_CRYPTO_TEST=m
1718
1719#
1720# Authenticated Encryption with Associated Data
1721#
1722CONFIG_CRYPTO_CCM=m
1723CONFIG_CRYPTO_GCM=m
1724CONFIG_CRYPTO_SEQIV=m
1725
1726#
1727# Block modes
1728#
1729CONFIG_CRYPTO_CBC=m
1730CONFIG_CRYPTO_CTR=m
1731CONFIG_CRYPTO_CTS=m
1732CONFIG_CRYPTO_ECB=m
1733CONFIG_CRYPTO_LRW=m
1734CONFIG_CRYPTO_PCBC=m
1735CONFIG_CRYPTO_XTS=m
1736
1737#
1738# Hash modes
1739#
1740CONFIG_CRYPTO_HMAC=y
1741CONFIG_CRYPTO_XCBC=m
1742CONFIG_CRYPTO_VMAC=m
1743
1744#
1745# Digest
1746#
1747CONFIG_CRYPTO_CRC32C=y
1748CONFIG_CRYPTO_GHASH=m
1749CONFIG_CRYPTO_MD4=m
1750CONFIG_CRYPTO_MD5=y
1751CONFIG_CRYPTO_MICHAEL_MIC=m
1752CONFIG_CRYPTO_RMD128=m
1753CONFIG_CRYPTO_RMD160=m
1754CONFIG_CRYPTO_RMD256=m
1755CONFIG_CRYPTO_RMD320=m
1756CONFIG_CRYPTO_SHA1=y
1757CONFIG_CRYPTO_SHA256=m
1758CONFIG_CRYPTO_SHA512=m
1759CONFIG_CRYPTO_TGR192=m
1760CONFIG_CRYPTO_WP512=m
1761
1762#
1763# Ciphers
1764#
1765CONFIG_CRYPTO_AES=m
1766CONFIG_CRYPTO_ANUBIS=m
1767CONFIG_CRYPTO_ARC4=m
1768CONFIG_CRYPTO_BLOWFISH=m
1769CONFIG_CRYPTO_CAMELLIA=m
1770CONFIG_CRYPTO_CAST5=m
1771CONFIG_CRYPTO_CAST6=m
1772CONFIG_CRYPTO_DES=m
1773CONFIG_CRYPTO_FCRYPT=m
1774CONFIG_CRYPTO_KHAZAD=m
1775# CONFIG_CRYPTO_SALSA20 is not set
1776CONFIG_CRYPTO_SEED=m
1777CONFIG_CRYPTO_SERPENT=m
1778CONFIG_CRYPTO_TEA=m
1779CONFIG_CRYPTO_TWOFISH=m
1780CONFIG_CRYPTO_TWOFISH_COMMON=m
1781
1782#
1783# Compression
1784#
1785CONFIG_CRYPTO_DEFLATE=m
1786CONFIG_CRYPTO_ZLIB=m
1787CONFIG_CRYPTO_LZO=m
1788
1789#
1790# Random Number Generation
1791#
1792CONFIG_CRYPTO_ANSI_CPRNG=m
1793# CONFIG_CRYPTO_USER_API_HASH is not set
1794# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
1795CONFIG_CRYPTO_HW=y
1796CONFIG_CRYPTO_DEV_HIFN_795X=m
1797CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y
1798# CONFIG_BINARY_PRINTF is not set
1799
1800#
1801# Library routines
1802#
1803CONFIG_RAID6_PQ=m
1804CONFIG_BITREVERSE=y
1805CONFIG_GENERIC_FIND_FIRST_BIT=y
1806CONFIG_GENERIC_FIND_NEXT_BIT=y
1807CONFIG_GENERIC_FIND_LAST_BIT=y
1808# CONFIG_CRC_CCITT is not set
1809CONFIG_CRC16=y
1810CONFIG_CRC_T10DIF=y
1811CONFIG_CRC_ITU_T=m
1812CONFIG_CRC32=y
1813# CONFIG_CRC7 is not set
1814CONFIG_LIBCRC32C=m
1815CONFIG_AUDIT_GENERIC=y
1816CONFIG_ZLIB_INFLATE=y
1817CONFIG_ZLIB_DEFLATE=m
1818CONFIG_LZO_COMPRESS=m
1819CONFIG_LZO_DECOMPRESS=m
1820# CONFIG_XZ_DEC is not set
1821# CONFIG_XZ_DEC_BCJ is not set
1822CONFIG_DECOMPRESS_GZIP=y
1823CONFIG_TEXTSEARCH=y
1824CONFIG_TEXTSEARCH_KMP=m
1825CONFIG_TEXTSEARCH_BM=m
1826CONFIG_TEXTSEARCH_FSM=m
1827CONFIG_HAS_IOMEM=y
1828CONFIG_HAS_IOPORT=y
1829CONFIG_HAS_DMA=y
1830CONFIG_CPU_RMAP=y
1831CONFIG_NLATTR=y
1832# CONFIG_AVERAGE is not set
1833# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
new file mode 100644
index 000000000000..f58dc362b944
--- /dev/null
+++ b/arch/tile/configs/tilepro_defconfig
@@ -0,0 +1,1163 @@
1#
2# Automatically generated make config: don't edit
3# Linux/tile 2.6.39-rc5 Kernel Configuration
4# Tue May 3 09:15:02 2011
5#
6CONFIG_TILE=y
7CONFIG_MMU=y
8CONFIG_GENERIC_CSUM=y
9CONFIG_SEMAPHORE_SLEEPERS=y
10CONFIG_HAVE_ARCH_ALLOC_REMAP=y
11CONFIG_HAVE_SETUP_PER_CPU_AREA=y
12CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
13CONFIG_SYS_SUPPORTS_HUGETLBFS=y
14CONFIG_GENERIC_TIME=y
15CONFIG_GENERIC_CLOCKEVENTS=y
16CONFIG_RWSEM_GENERIC_SPINLOCK=y
17CONFIG_DEFAULT_MIGRATION_COST=10000000
18CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
19CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
20CONFIG_ARCH_DMA_ADDR_T_64BIT=y
21CONFIG_LOCKDEP_SUPPORT=y
22CONFIG_STACKTRACE_SUPPORT=y
23CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
24CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
25CONFIG_TRACE_IRQFLAGS_SUPPORT=y
26CONFIG_STRICT_DEVMEM=y
27CONFIG_SMP=y
28# CONFIG_DEBUG_COPY_FROM_USER is not set
29CONFIG_HVC_TILE=y
30# CONFIG_TILEGX is not set
31CONFIG_ARCH_DEFCONFIG="arch/tile/configs/tile_defconfig"
32CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
33CONFIG_CONSTRUCTORS=y
34
35#
36# General setup
37#
38CONFIG_EXPERIMENTAL=y
39CONFIG_INIT_ENV_ARG_LIMIT=32
40CONFIG_CROSS_COMPILE=""
41CONFIG_LOCALVERSION=""
42CONFIG_LOCALVERSION_AUTO=y
43# CONFIG_SWAP is not set
44CONFIG_SYSVIPC=y
45CONFIG_SYSVIPC_SYSCTL=y
46# CONFIG_POSIX_MQUEUE is not set
47# CONFIG_BSD_PROCESS_ACCT is not set
48CONFIG_FHANDLE=y
49# CONFIG_TASKSTATS is not set
50# CONFIG_AUDIT is not set
51CONFIG_HAVE_GENERIC_HARDIRQS=y
52
53#
54# IRQ subsystem
55#
56CONFIG_GENERIC_HARDIRQS=y
57CONFIG_GENERIC_IRQ_PROBE=y
58CONFIG_GENERIC_IRQ_SHOW=y
59CONFIG_GENERIC_PENDING_IRQ=y
60
61#
62# RCU Subsystem
63#
64CONFIG_TREE_RCU=y
65# CONFIG_PREEMPT_RCU is not set
66# CONFIG_RCU_TRACE is not set
67CONFIG_RCU_FANOUT=32
68# CONFIG_RCU_FANOUT_EXACT is not set
69# CONFIG_RCU_FAST_NO_HZ is not set
70# CONFIG_TREE_RCU_TRACE is not set
71# CONFIG_IKCONFIG is not set
72CONFIG_LOG_BUF_SHIFT=17
73# CONFIG_CGROUPS is not set
74# CONFIG_NAMESPACES is not set
75# CONFIG_SCHED_AUTOGROUP is not set
76# CONFIG_SYSFS_DEPRECATED is not set
77# CONFIG_RELAY is not set
78CONFIG_BLK_DEV_INITRD=y
79CONFIG_INITRAMFS_SOURCE="usr/contents.txt"
80CONFIG_INITRAMFS_ROOT_UID=0
81CONFIG_INITRAMFS_ROOT_GID=0
82CONFIG_RD_GZIP=y
83# CONFIG_RD_BZIP2 is not set
84# CONFIG_RD_LZMA is not set
85# CONFIG_RD_XZ is not set
86# CONFIG_RD_LZO is not set
87CONFIG_INITRAMFS_COMPRESSION_NONE=y
88# CONFIG_INITRAMFS_COMPRESSION_GZIP is not set
89CONFIG_CC_OPTIMIZE_FOR_SIZE=y
90CONFIG_SYSCTL=y
91CONFIG_ANON_INODES=y
92CONFIG_EXPERT=y
93CONFIG_SYSCTL_SYSCALL=y
94CONFIG_KALLSYMS=y
95# CONFIG_KALLSYMS_ALL is not set
96# CONFIG_KALLSYMS_EXTRA_PASS is not set
97CONFIG_HOTPLUG=y
98CONFIG_PRINTK=y
99CONFIG_BUG=y
100CONFIG_ELF_CORE=y
101CONFIG_BASE_FULL=y
102CONFIG_FUTEX=y
103CONFIG_EPOLL=y
104CONFIG_SIGNALFD=y
105CONFIG_TIMERFD=y
106CONFIG_EVENTFD=y
107CONFIG_SHMEM=y
108CONFIG_AIO=y
109CONFIG_EMBEDDED=y
110
111#
112# Kernel Performance Events And Counters
113#
114CONFIG_VM_EVENT_COUNTERS=y
115CONFIG_PCI_QUIRKS=y
116CONFIG_SLUB_DEBUG=y
117# CONFIG_COMPAT_BRK is not set
118# CONFIG_SLAB is not set
119CONFIG_SLUB=y
120# CONFIG_SLOB is not set
121CONFIG_PROFILING=y
122CONFIG_USE_GENERIC_SMP_HELPERS=y
123
124#
125# GCOV-based kernel profiling
126#
127# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
128CONFIG_SLABINFO=y
129CONFIG_RT_MUTEXES=y
130CONFIG_BASE_SMALL=0
131CONFIG_MODULES=y
132# CONFIG_MODULE_FORCE_LOAD is not set
133CONFIG_MODULE_UNLOAD=y
134# CONFIG_MODULE_FORCE_UNLOAD is not set
135# CONFIG_MODVERSIONS is not set
136# CONFIG_MODULE_SRCVERSION_ALL is not set
137CONFIG_STOP_MACHINE=y
138CONFIG_BLOCK=y
139CONFIG_LBDAF=y
140# CONFIG_BLK_DEV_BSG is not set
141# CONFIG_BLK_DEV_INTEGRITY is not set
142
143#
144# IO Schedulers
145#
146CONFIG_IOSCHED_NOOP=y
147# CONFIG_IOSCHED_DEADLINE is not set
148# CONFIG_IOSCHED_CFQ is not set
149CONFIG_DEFAULT_NOOP=y
150CONFIG_DEFAULT_IOSCHED="noop"
151# CONFIG_INLINE_SPIN_TRYLOCK is not set
152# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
153# CONFIG_INLINE_SPIN_LOCK is not set
154# CONFIG_INLINE_SPIN_LOCK_BH is not set
155# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
156# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
157CONFIG_INLINE_SPIN_UNLOCK=y
158# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
159CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
160# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
161# CONFIG_INLINE_READ_TRYLOCK is not set
162# CONFIG_INLINE_READ_LOCK is not set
163# CONFIG_INLINE_READ_LOCK_BH is not set
164# CONFIG_INLINE_READ_LOCK_IRQ is not set
165# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
166CONFIG_INLINE_READ_UNLOCK=y
167# CONFIG_INLINE_READ_UNLOCK_BH is not set
168CONFIG_INLINE_READ_UNLOCK_IRQ=y
169# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
170# CONFIG_INLINE_WRITE_TRYLOCK is not set
171# CONFIG_INLINE_WRITE_LOCK is not set
172# CONFIG_INLINE_WRITE_LOCK_BH is not set
173# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
174# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
175CONFIG_INLINE_WRITE_UNLOCK=y
176# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
177CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
178# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
179CONFIG_MUTEX_SPIN_ON_OWNER=y
180
181#
182# Tilera-specific configuration
183#
184CONFIG_NR_CPUS=64
185CONFIG_TICK_ONESHOT=y
186CONFIG_NO_HZ=y
187CONFIG_HIGH_RES_TIMERS=y
188CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
189CONFIG_HZ_100=y
190# CONFIG_HZ_250 is not set
191# CONFIG_HZ_300 is not set
192# CONFIG_HZ_1000 is not set
193CONFIG_HZ=100
194CONFIG_SCHED_HRTICK=y
195# CONFIG_KEXEC is not set
196CONFIG_HIGHMEM=y
197CONFIG_NUMA=y
198CONFIG_NODES_SHIFT=2
199# CONFIG_VMSPLIT_3_75G is not set
200# CONFIG_VMSPLIT_3_5G is not set
201CONFIG_VMSPLIT_3G=y
202# CONFIG_VMSPLIT_2_75G is not set
203# CONFIG_VMSPLIT_2_5G is not set
204# CONFIG_VMSPLIT_2_25G is not set
205# CONFIG_VMSPLIT_2G is not set
206# CONFIG_VMSPLIT_1G is not set
207CONFIG_PAGE_OFFSET=0xC0000000
208CONFIG_SELECT_MEMORY_MODEL=y
209CONFIG_DISCONTIGMEM_MANUAL=y
210CONFIG_DISCONTIGMEM=y
211CONFIG_FLAT_NODE_MEM_MAP=y
212CONFIG_NEED_MULTIPLE_NODES=y
213CONFIG_PAGEFLAGS_EXTENDED=y
214CONFIG_SPLIT_PTLOCK_CPUS=4
215# CONFIG_COMPACTION is not set
216CONFIG_MIGRATION=y
217CONFIG_PHYS_ADDR_T_64BIT=y
218CONFIG_ZONE_DMA_FLAG=0
219CONFIG_BOUNCE=y
220CONFIG_VIRT_TO_BUS=y
221# CONFIG_KSM is not set
222CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
223# CONFIG_CMDLINE_BOOL is not set
224CONFIG_VMALLOC_RESERVE=0x1000000
225CONFIG_HARDWALL=y
226CONFIG_KERNEL_PL=1
227
228#
229# Bus options
230#
231CONFIG_PCI=y
232CONFIG_PCI_DOMAINS=y
233# CONFIG_NO_IOMEM is not set
234# CONFIG_NO_IOPORT is not set
235# CONFIG_ARCH_SUPPORTS_MSI is not set
236# CONFIG_PCI_DEBUG is not set
237# CONFIG_PCI_STUB is not set
238# CONFIG_PCI_IOV is not set
239# CONFIG_HOTPLUG_PCI is not set
240
241#
242# Executable file formats
243#
244CONFIG_KCORE_ELF=y
245CONFIG_BINFMT_ELF=y
246# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
247# CONFIG_HAVE_AOUT is not set
248# CONFIG_BINFMT_MISC is not set
249CONFIG_NET=y
250
251#
252# Networking options
253#
254CONFIG_PACKET=y
255CONFIG_UNIX=y
256CONFIG_XFRM=y
257# CONFIG_XFRM_USER is not set
258# CONFIG_XFRM_SUB_POLICY is not set
259# CONFIG_XFRM_MIGRATE is not set
260# CONFIG_XFRM_STATISTICS is not set
261# CONFIG_NET_KEY is not set
262CONFIG_INET=y
263CONFIG_IP_MULTICAST=y
264# CONFIG_IP_ADVANCED_ROUTER is not set
265# CONFIG_IP_PNP is not set
266# CONFIG_NET_IPIP is not set
267# CONFIG_NET_IPGRE_DEMUX is not set
268# CONFIG_IP_MROUTE is not set
269# CONFIG_ARPD is not set
270# CONFIG_SYN_COOKIES is not set
271# CONFIG_INET_AH is not set
272# CONFIG_INET_ESP is not set
273# CONFIG_INET_IPCOMP is not set
274# CONFIG_INET_XFRM_TUNNEL is not set
275CONFIG_INET_TUNNEL=y
276# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
277# CONFIG_INET_XFRM_MODE_TUNNEL is not set
278CONFIG_INET_XFRM_MODE_BEET=y
279# CONFIG_INET_LRO is not set
280# CONFIG_INET_DIAG is not set
281# CONFIG_TCP_CONG_ADVANCED is not set
282CONFIG_TCP_CONG_CUBIC=y
283CONFIG_DEFAULT_TCP_CONG="cubic"
284# CONFIG_TCP_MD5SIG is not set
285CONFIG_IPV6=y
286# CONFIG_IPV6_PRIVACY is not set
287# CONFIG_IPV6_ROUTER_PREF is not set
288# CONFIG_IPV6_OPTIMISTIC_DAD is not set
289# CONFIG_INET6_AH is not set
290# CONFIG_INET6_ESP is not set
291# CONFIG_INET6_IPCOMP is not set
292# CONFIG_IPV6_MIP6 is not set
293# CONFIG_INET6_XFRM_TUNNEL is not set
294# CONFIG_INET6_TUNNEL is not set
295CONFIG_INET6_XFRM_MODE_TRANSPORT=y
296CONFIG_INET6_XFRM_MODE_TUNNEL=y
297CONFIG_INET6_XFRM_MODE_BEET=y
298# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
299CONFIG_IPV6_SIT=y
300# CONFIG_IPV6_SIT_6RD is not set
301CONFIG_IPV6_NDISC_NODETYPE=y
302# CONFIG_IPV6_TUNNEL is not set
303# CONFIG_IPV6_MULTIPLE_TABLES is not set
304# CONFIG_IPV6_MROUTE is not set
305# CONFIG_NETWORK_SECMARK is not set
306# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
307# CONFIG_NETFILTER is not set
308# CONFIG_IP_DCCP is not set
309# CONFIG_IP_SCTP is not set
310# CONFIG_RDS is not set
311# CONFIG_TIPC is not set
312# CONFIG_ATM is not set
313# CONFIG_L2TP is not set
314# CONFIG_BRIDGE is not set
315# CONFIG_NET_DSA is not set
316# CONFIG_VLAN_8021Q is not set
317# CONFIG_DECNET is not set
318# CONFIG_LLC2 is not set
319# CONFIG_IPX is not set
320# CONFIG_ATALK is not set
321# CONFIG_X25 is not set
322# CONFIG_LAPB is not set
323# CONFIG_ECONET is not set
324# CONFIG_WAN_ROUTER is not set
325# CONFIG_PHONET is not set
326# CONFIG_IEEE802154 is not set
327# CONFIG_NET_SCHED is not set
328# CONFIG_DCB is not set
329# CONFIG_BATMAN_ADV is not set
330CONFIG_RPS=y
331CONFIG_RFS_ACCEL=y
332CONFIG_XPS=y
333
334#
335# Network testing
336#
337# CONFIG_NET_PKTGEN is not set
338# CONFIG_HAMRADIO is not set
339# CONFIG_CAN is not set
340# CONFIG_IRDA is not set
341# CONFIG_BT is not set
342# CONFIG_AF_RXRPC is not set
343# CONFIG_WIRELESS is not set
344# CONFIG_WIMAX is not set
345# CONFIG_RFKILL is not set
346# CONFIG_NET_9P is not set
347# CONFIG_CAIF is not set
348# CONFIG_CEPH_LIB is not set
349
350#
351# Device Drivers
352#
353
354#
355# Generic Driver Options
356#
357CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
358# CONFIG_DEVTMPFS is not set
359CONFIG_STANDALONE=y
360CONFIG_PREVENT_FIRMWARE_BUILD=y
361CONFIG_FW_LOADER=y
362CONFIG_FIRMWARE_IN_KERNEL=y
363CONFIG_EXTRA_FIRMWARE=""
364# CONFIG_DEBUG_DRIVER is not set
365# CONFIG_DEBUG_DEVRES is not set
366# CONFIG_SYS_HYPERVISOR is not set
367# CONFIG_CONNECTOR is not set
368# CONFIG_MTD is not set
369# CONFIG_PARPORT is not set
370CONFIG_BLK_DEV=y
371# CONFIG_BLK_CPQ_DA is not set
372# CONFIG_BLK_CPQ_CISS_DA is not set
373# CONFIG_BLK_DEV_DAC960 is not set
374# CONFIG_BLK_DEV_UMEM is not set
375# CONFIG_BLK_DEV_COW_COMMON is not set
376# CONFIG_BLK_DEV_LOOP is not set
377
378#
379# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
380#
381# CONFIG_BLK_DEV_NBD is not set
382# CONFIG_BLK_DEV_SX8 is not set
383# CONFIG_BLK_DEV_RAM is not set
384# CONFIG_CDROM_PKTCDVD is not set
385# CONFIG_ATA_OVER_ETH is not set
386# CONFIG_BLK_DEV_RBD is not set
387# CONFIG_SENSORS_LIS3LV02D is not set
388CONFIG_MISC_DEVICES=y
389# CONFIG_PHANTOM is not set
390# CONFIG_SGI_IOC4 is not set
391# CONFIG_TIFM_CORE is not set
392# CONFIG_ENCLOSURE_SERVICES is not set
393# CONFIG_HP_ILO is not set
394# CONFIG_PCH_PHUB is not set
395# CONFIG_C2PORT is not set
396
397#
398# EEPROM support
399#
400# CONFIG_EEPROM_93CX6 is not set
401# CONFIG_CB710_CORE is not set
402
403#
404# Texas Instruments shared transport line discipline
405#
406
407#
408# SCSI device support
409#
410CONFIG_SCSI_MOD=y
411# CONFIG_RAID_ATTRS is not set
412CONFIG_SCSI=y
413CONFIG_SCSI_DMA=y
414# CONFIG_SCSI_TGT is not set
415# CONFIG_SCSI_NETLINK is not set
416CONFIG_SCSI_PROC_FS=y
417
418#
419# SCSI support type (disk, tape, CD-ROM)
420#
421CONFIG_BLK_DEV_SD=y
422# CONFIG_CHR_DEV_ST is not set
423# CONFIG_CHR_DEV_OSST is not set
424# CONFIG_BLK_DEV_SR is not set
425# CONFIG_CHR_DEV_SG is not set
426# CONFIG_CHR_DEV_SCH is not set
427# CONFIG_SCSI_MULTI_LUN is not set
428CONFIG_SCSI_CONSTANTS=y
429CONFIG_SCSI_LOGGING=y
430# CONFIG_SCSI_SCAN_ASYNC is not set
431CONFIG_SCSI_WAIT_SCAN=m
432
433#
434# SCSI Transports
435#
436# CONFIG_SCSI_SPI_ATTRS is not set
437# CONFIG_SCSI_FC_ATTRS is not set
438# CONFIG_SCSI_ISCSI_ATTRS is not set
439# CONFIG_SCSI_SAS_ATTRS is not set
440# CONFIG_SCSI_SAS_LIBSAS is not set
441# CONFIG_SCSI_SRP_ATTRS is not set
442CONFIG_SCSI_LOWLEVEL=y
443# CONFIG_ISCSI_TCP is not set
444# CONFIG_ISCSI_BOOT_SYSFS is not set
445# CONFIG_SCSI_CXGB3_ISCSI is not set
446# CONFIG_SCSI_CXGB4_ISCSI is not set
447# CONFIG_SCSI_BNX2_ISCSI is not set
448# CONFIG_SCSI_BNX2X_FCOE is not set
449# CONFIG_BE2ISCSI is not set
450# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
451# CONFIG_SCSI_HPSA is not set
452# CONFIG_SCSI_3W_9XXX is not set
453# CONFIG_SCSI_3W_SAS is not set
454# CONFIG_SCSI_ACARD is not set
455# CONFIG_SCSI_AACRAID is not set
456# CONFIG_SCSI_AIC7XXX is not set
457# CONFIG_SCSI_AIC7XXX_OLD is not set
458# CONFIG_SCSI_AIC79XX is not set
459# CONFIG_SCSI_AIC94XX is not set
460# CONFIG_SCSI_MVSAS is not set
461# CONFIG_SCSI_DPT_I2O is not set
462# CONFIG_SCSI_ADVANSYS is not set
463# CONFIG_SCSI_ARCMSR is not set
464# CONFIG_MEGARAID_NEWGEN is not set
465# CONFIG_MEGARAID_LEGACY is not set
466# CONFIG_MEGARAID_SAS is not set
467# CONFIG_SCSI_MPT2SAS is not set
468# CONFIG_SCSI_HPTIOP is not set
469# CONFIG_LIBFC is not set
470# CONFIG_LIBFCOE is not set
471# CONFIG_FCOE is not set
472# CONFIG_SCSI_DMX3191D is not set
473# CONFIG_SCSI_FUTURE_DOMAIN is not set
474# CONFIG_SCSI_IPS is not set
475# CONFIG_SCSI_INITIO is not set
476# CONFIG_SCSI_INIA100 is not set
477# CONFIG_SCSI_STEX is not set
478# CONFIG_SCSI_SYM53C8XX_2 is not set
479# CONFIG_SCSI_QLOGIC_1280 is not set
480# CONFIG_SCSI_QLA_FC is not set
481# CONFIG_SCSI_QLA_ISCSI is not set
482# CONFIG_SCSI_LPFC is not set
483# CONFIG_SCSI_DC395x is not set
484# CONFIG_SCSI_DC390T is not set
485# CONFIG_SCSI_NSP32 is not set
486# CONFIG_SCSI_DEBUG is not set
487# CONFIG_SCSI_PMCRAID is not set
488# CONFIG_SCSI_PM8001 is not set
489# CONFIG_SCSI_SRP is not set
490# CONFIG_SCSI_BFA_FC is not set
491# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
492# CONFIG_SCSI_DH is not set
493# CONFIG_SCSI_OSD_INITIATOR is not set
494# CONFIG_ATA is not set
495# CONFIG_MD is not set
496# CONFIG_TARGET_CORE is not set
497# CONFIG_FUSION is not set
498
499#
500# IEEE 1394 (FireWire) support
501#
502# CONFIG_FIREWIRE is not set
503# CONFIG_FIREWIRE_NOSY is not set
504# CONFIG_I2O is not set
505CONFIG_NETDEVICES=y
506# CONFIG_DUMMY is not set
507# CONFIG_BONDING is not set
508# CONFIG_MACVLAN is not set
509# CONFIG_EQUALIZER is not set
510CONFIG_TUN=y
511# CONFIG_VETH is not set
512# CONFIG_ARCNET is not set
513# CONFIG_MII is not set
514# CONFIG_PHYLIB is not set
515# CONFIG_NET_ETHERNET is not set
516CONFIG_NETDEV_1000=y
517# CONFIG_ACENIC is not set
518# CONFIG_DL2K is not set
519# CONFIG_E1000 is not set
520# CONFIG_E1000E is not set
521# CONFIG_IP1000 is not set
522# CONFIG_IGB is not set
523# CONFIG_IGBVF is not set
524# CONFIG_NS83820 is not set
525# CONFIG_HAMACHI is not set
526# CONFIG_YELLOWFIN is not set
527# CONFIG_R8169 is not set
528# CONFIG_SIS190 is not set
529# CONFIG_SKGE is not set
530# CONFIG_SKY2 is not set
531# CONFIG_VIA_VELOCITY is not set
532# CONFIG_TIGON3 is not set
533# CONFIG_BNX2 is not set
534# CONFIG_CNIC is not set
535# CONFIG_QLA3XXX is not set
536# CONFIG_ATL1 is not set
537# CONFIG_ATL1E is not set
538# CONFIG_ATL1C is not set
539# CONFIG_JME is not set
540# CONFIG_STMMAC_ETH is not set
541# CONFIG_PCH_GBE is not set
542# CONFIG_NETDEV_10000 is not set
543# CONFIG_TR is not set
544# CONFIG_WLAN is not set
545
546#
547# Enable WiMAX (Networking options) to see the WiMAX drivers
548#
549# CONFIG_WAN is not set
550
551#
552# CAIF transport drivers
553#
554CONFIG_TILE_NET=y
555# CONFIG_FDDI is not set
556# CONFIG_HIPPI is not set
557# CONFIG_PPP is not set
558# CONFIG_SLIP is not set
559# CONFIG_NET_FC is not set
560# CONFIG_NETCONSOLE is not set
561# CONFIG_NETPOLL is not set
562# CONFIG_NET_POLL_CONTROLLER is not set
563# CONFIG_VMXNET3 is not set
564# CONFIG_ISDN is not set
565# CONFIG_PHONE is not set
566
567#
568# Input device support
569#
570CONFIG_INPUT=y
571# CONFIG_INPUT_FF_MEMLESS is not set
572# CONFIG_INPUT_POLLDEV is not set
573# CONFIG_INPUT_SPARSEKMAP is not set
574
575#
576# Userland interfaces
577#
578# CONFIG_INPUT_MOUSEDEV is not set
579# CONFIG_INPUT_JOYDEV is not set
580# CONFIG_INPUT_EVDEV is not set
581# CONFIG_INPUT_EVBUG is not set
582
583#
584# Input Device Drivers
585#
586# CONFIG_INPUT_KEYBOARD is not set
587# CONFIG_INPUT_MOUSE is not set
588# CONFIG_INPUT_JOYSTICK is not set
589# CONFIG_INPUT_TABLET is not set
590# CONFIG_INPUT_TOUCHSCREEN is not set
591# CONFIG_INPUT_MISC is not set
592
593#
594# Hardware I/O ports
595#
596# CONFIG_SERIO is not set
597# CONFIG_GAMEPORT is not set
598
599#
600# Character devices
601#
602# CONFIG_VT is not set
603CONFIG_UNIX98_PTYS=y
604# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
605# CONFIG_LEGACY_PTYS is not set
606# CONFIG_SERIAL_NONSTANDARD is not set
607# CONFIG_NOZOMI is not set
608# CONFIG_N_GSM is not set
609CONFIG_DEVKMEM=y
610
611#
612# Serial drivers
613#
614# CONFIG_SERIAL_8250 is not set
615
616#
617# Non-8250 serial port support
618#
619# CONFIG_SERIAL_MFD_HSU is not set
620# CONFIG_SERIAL_JSM is not set
621# CONFIG_SERIAL_TIMBERDALE is not set
622# CONFIG_SERIAL_ALTERA_JTAGUART is not set
623# CONFIG_SERIAL_ALTERA_UART is not set
624# CONFIG_SERIAL_PCH_UART is not set
625# CONFIG_TTY_PRINTK is not set
626CONFIG_HVC_DRIVER=y
627# CONFIG_IPMI_HANDLER is not set
628# CONFIG_HW_RANDOM is not set
629# CONFIG_R3964 is not set
630# CONFIG_APPLICOM is not set
631
632#
633# PCMCIA character devices
634#
635# CONFIG_RAW_DRIVER is not set
636# CONFIG_TCG_TPM is not set
637CONFIG_DEVPORT=y
638# CONFIG_RAMOOPS is not set
639# CONFIG_I2C is not set
640# CONFIG_SPI is not set
641
642#
643# PPS support
644#
645# CONFIG_PPS is not set
646
647#
648# PPS generators support
649#
650# CONFIG_W1 is not set
651# CONFIG_POWER_SUPPLY is not set
652CONFIG_HWMON=y
653# CONFIG_HWMON_VID is not set
654# CONFIG_HWMON_DEBUG_CHIP is not set
655
656#
657# Native drivers
658#
659# CONFIG_SENSORS_I5K_AMB is not set
660# CONFIG_SENSORS_F71805F is not set
661# CONFIG_SENSORS_F71882FG is not set
662# CONFIG_SENSORS_IT87 is not set
663# CONFIG_SENSORS_PC87360 is not set
664# CONFIG_SENSORS_PC87427 is not set
665# CONFIG_SENSORS_SIS5595 is not set
666# CONFIG_SENSORS_SMSC47M1 is not set
667# CONFIG_SENSORS_SMSC47B397 is not set
668# CONFIG_SENSORS_SCH5627 is not set
669# CONFIG_SENSORS_VIA686A is not set
670# CONFIG_SENSORS_VT1211 is not set
671# CONFIG_SENSORS_VT8231 is not set
672# CONFIG_SENSORS_W83627HF is not set
673# CONFIG_SENSORS_W83627EHF is not set
674# CONFIG_THERMAL is not set
675CONFIG_WATCHDOG=y
676CONFIG_WATCHDOG_NOWAYOUT=y
677
678#
679# Watchdog Device Drivers
680#
681# CONFIG_SOFT_WATCHDOG is not set
682# CONFIG_ALIM7101_WDT is not set
683
684#
685# PCI-based Watchdog Cards
686#
687# CONFIG_PCIPCWATCHDOG is not set
688# CONFIG_WDTPCI is not set
689CONFIG_SSB_POSSIBLE=y
690
691#
692# Sonics Silicon Backplane
693#
694# CONFIG_SSB is not set
695CONFIG_MFD_SUPPORT=y
696# CONFIG_MFD_CORE is not set
697# CONFIG_MFD_SM501 is not set
698# CONFIG_HTC_PASIC3 is not set
699# CONFIG_MFD_TMIO is not set
700# CONFIG_ABX500_CORE is not set
701# CONFIG_LPC_SCH is not set
702# CONFIG_MFD_RDC321X is not set
703# CONFIG_MFD_JANZ_CMODIO is not set
704# CONFIG_MFD_VX855 is not set
705# CONFIG_REGULATOR is not set
706# CONFIG_MEDIA_SUPPORT is not set
707
708#
709# Graphics support
710#
711CONFIG_VGA_ARB=y
712CONFIG_VGA_ARB_MAX_GPUS=16
713# CONFIG_DRM is not set
714# CONFIG_STUB_POULSBO is not set
715# CONFIG_VGASTATE is not set
716# CONFIG_VIDEO_OUTPUT_CONTROL is not set
717# CONFIG_FB is not set
718# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
719
720#
721# Display device support
722#
723# CONFIG_DISPLAY_SUPPORT is not set
724# CONFIG_SOUND is not set
725# CONFIG_HID_SUPPORT is not set
726CONFIG_USB_SUPPORT=y
727CONFIG_USB_ARCH_HAS_HCD=y
728CONFIG_USB_ARCH_HAS_OHCI=y
729CONFIG_USB_ARCH_HAS_EHCI=y
730# CONFIG_USB is not set
731# CONFIG_USB_OTG_WHITELIST is not set
732# CONFIG_USB_OTG_BLACKLIST_HUB is not set
733
734#
735# Enable Host or Gadget support to see Inventra options
736#
737
738#
739# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
740#
741# CONFIG_USB_GADGET is not set
742
743#
744# OTG and related infrastructure
745#
746# CONFIG_UWB is not set
747# CONFIG_MMC is not set
748# CONFIG_MEMSTICK is not set
749# CONFIG_NEW_LEDS is not set
750# CONFIG_NFC_DEVICES is not set
751# CONFIG_ACCESSIBILITY is not set
752# CONFIG_INFINIBAND is not set
753CONFIG_EDAC=y
754
755#
756# Reporting subsystems
757#
758# CONFIG_EDAC_DEBUG is not set
759CONFIG_EDAC_MM_EDAC=y
760CONFIG_EDAC_TILE=y
761CONFIG_RTC_LIB=y
762CONFIG_RTC_CLASS=y
763CONFIG_RTC_HCTOSYS=y
764CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
765# CONFIG_RTC_DEBUG is not set
766
767#
768# RTC interfaces
769#
770# CONFIG_RTC_INTF_SYSFS is not set
771# CONFIG_RTC_INTF_PROC is not set
772CONFIG_RTC_INTF_DEV=y
773# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
774# CONFIG_RTC_DRV_TEST is not set
775
776#
777# SPI RTC drivers
778#
779
780#
781# Platform RTC drivers
782#
783# CONFIG_RTC_DRV_DS1286 is not set
784# CONFIG_RTC_DRV_DS1511 is not set
785# CONFIG_RTC_DRV_DS1553 is not set
786# CONFIG_RTC_DRV_DS1742 is not set
787# CONFIG_RTC_DRV_STK17TA8 is not set
788# CONFIG_RTC_DRV_M48T86 is not set
789# CONFIG_RTC_DRV_M48T35 is not set
790# CONFIG_RTC_DRV_M48T59 is not set
791# CONFIG_RTC_DRV_MSM6242 is not set
792# CONFIG_RTC_DRV_BQ4802 is not set
793# CONFIG_RTC_DRV_RP5C01 is not set
794# CONFIG_RTC_DRV_V3020 is not set
795
796#
797# on-CPU RTC drivers
798#
799CONFIG_RTC_DRV_TILE=y
800# CONFIG_DMADEVICES is not set
801# CONFIG_AUXDISPLAY is not set
802# CONFIG_UIO is not set
803# CONFIG_STAGING is not set
804
805#
806# File systems
807#
808CONFIG_EXT2_FS=y
809# CONFIG_EXT2_FS_XATTR is not set
810# CONFIG_EXT2_FS_XIP is not set
811CONFIG_EXT3_FS=y
812# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
813CONFIG_EXT3_FS_XATTR=y
814# CONFIG_EXT3_FS_POSIX_ACL is not set
815# CONFIG_EXT3_FS_SECURITY is not set
816# CONFIG_EXT4_FS is not set
817CONFIG_JBD=y
818CONFIG_FS_MBCACHE=y
819# CONFIG_REISERFS_FS is not set
820# CONFIG_JFS_FS is not set
821# CONFIG_XFS_FS is not set
822# CONFIG_GFS2_FS is not set
823# CONFIG_BTRFS_FS is not set
824# CONFIG_NILFS2_FS is not set
825# CONFIG_FS_POSIX_ACL is not set
826CONFIG_EXPORTFS=y
827CONFIG_FILE_LOCKING=y
828CONFIG_FSNOTIFY=y
829CONFIG_DNOTIFY=y
830CONFIG_INOTIFY_USER=y
831# CONFIG_FANOTIFY is not set
832# CONFIG_QUOTA is not set
833# CONFIG_QUOTACTL is not set
834# CONFIG_AUTOFS4_FS is not set
835CONFIG_FUSE_FS=y
836# CONFIG_CUSE is not set
837
838#
839# Caches
840#
841# CONFIG_FSCACHE is not set
842
843#
844# CD-ROM/DVD Filesystems
845#
846# CONFIG_ISO9660_FS is not set
847# CONFIG_UDF_FS is not set
848
849#
850# DOS/FAT/NT Filesystems
851#
852CONFIG_FAT_FS=y
853CONFIG_MSDOS_FS=y
854CONFIG_VFAT_FS=m
855CONFIG_FAT_DEFAULT_CODEPAGE=437
856CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
857# CONFIG_NTFS_FS is not set
858
859#
860# Pseudo filesystems
861#
862CONFIG_PROC_FS=y
863# CONFIG_PROC_KCORE is not set
864CONFIG_PROC_SYSCTL=y
865CONFIG_PROC_PAGE_MONITOR=y
866CONFIG_SYSFS=y
867CONFIG_TMPFS=y
868# CONFIG_TMPFS_POSIX_ACL is not set
869CONFIG_HUGETLBFS=y
870CONFIG_HUGETLB_PAGE=y
871# CONFIG_CONFIGFS_FS is not set
872CONFIG_MISC_FILESYSTEMS=y
873# CONFIG_ADFS_FS is not set
874# CONFIG_AFFS_FS is not set
875# CONFIG_HFS_FS is not set
876# CONFIG_HFSPLUS_FS is not set
877# CONFIG_BEFS_FS is not set
878# CONFIG_BFS_FS is not set
879# CONFIG_EFS_FS is not set
880# CONFIG_LOGFS is not set
881# CONFIG_CRAMFS is not set
882# CONFIG_SQUASHFS is not set
883# CONFIG_VXFS_FS is not set
884# CONFIG_MINIX_FS is not set
885# CONFIG_OMFS_FS is not set
886# CONFIG_HPFS_FS is not set
887# CONFIG_QNX4FS_FS is not set
888# CONFIG_ROMFS_FS is not set
889# CONFIG_PSTORE is not set
890# CONFIG_SYSV_FS is not set
891# CONFIG_UFS_FS is not set
892CONFIG_NETWORK_FILESYSTEMS=y
893CONFIG_NFS_FS=m
894CONFIG_NFS_V3=y
895# CONFIG_NFS_V3_ACL is not set
896# CONFIG_NFS_V4 is not set
897# CONFIG_NFSD is not set
898CONFIG_LOCKD=m
899CONFIG_LOCKD_V4=y
900CONFIG_NFS_COMMON=y
901CONFIG_SUNRPC=m
902# CONFIG_RPCSEC_GSS_KRB5 is not set
903# CONFIG_CEPH_FS is not set
904# CONFIG_CIFS is not set
905# CONFIG_NCP_FS is not set
906# CONFIG_CODA_FS is not set
907# CONFIG_AFS_FS is not set
908
909#
910# Partition Types
911#
912# CONFIG_PARTITION_ADVANCED is not set
913CONFIG_MSDOS_PARTITION=y
914CONFIG_NLS=y
915CONFIG_NLS_DEFAULT="iso8859-1"
916CONFIG_NLS_CODEPAGE_437=y
917# CONFIG_NLS_CODEPAGE_737 is not set
918# CONFIG_NLS_CODEPAGE_775 is not set
919# CONFIG_NLS_CODEPAGE_850 is not set
920# CONFIG_NLS_CODEPAGE_852 is not set
921# CONFIG_NLS_CODEPAGE_855 is not set
922# CONFIG_NLS_CODEPAGE_857 is not set
923# CONFIG_NLS_CODEPAGE_860 is not set
924# CONFIG_NLS_CODEPAGE_861 is not set
925# CONFIG_NLS_CODEPAGE_862 is not set
926# CONFIG_NLS_CODEPAGE_863 is not set
927# CONFIG_NLS_CODEPAGE_864 is not set
928# CONFIG_NLS_CODEPAGE_865 is not set
929# CONFIG_NLS_CODEPAGE_866 is not set
930# CONFIG_NLS_CODEPAGE_869 is not set
931# CONFIG_NLS_CODEPAGE_936 is not set
932# CONFIG_NLS_CODEPAGE_950 is not set
933# CONFIG_NLS_CODEPAGE_932 is not set
934# CONFIG_NLS_CODEPAGE_949 is not set
935# CONFIG_NLS_CODEPAGE_874 is not set
936# CONFIG_NLS_ISO8859_8 is not set
937# CONFIG_NLS_CODEPAGE_1250 is not set
938# CONFIG_NLS_CODEPAGE_1251 is not set
939# CONFIG_NLS_ASCII is not set
940CONFIG_NLS_ISO8859_1=y
941# CONFIG_NLS_ISO8859_2 is not set
942# CONFIG_NLS_ISO8859_3 is not set
943# CONFIG_NLS_ISO8859_4 is not set
944# CONFIG_NLS_ISO8859_5 is not set
945# CONFIG_NLS_ISO8859_6 is not set
946# CONFIG_NLS_ISO8859_7 is not set
947# CONFIG_NLS_ISO8859_9 is not set
948# CONFIG_NLS_ISO8859_13 is not set
949# CONFIG_NLS_ISO8859_14 is not set
950# CONFIG_NLS_ISO8859_15 is not set
951# CONFIG_NLS_KOI8_R is not set
952# CONFIG_NLS_KOI8_U is not set
953# CONFIG_NLS_UTF8 is not set
954
955#
956# Kernel hacking
957#
958# CONFIG_PRINTK_TIME is not set
959CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
960CONFIG_ENABLE_WARN_DEPRECATED=y
961CONFIG_ENABLE_MUST_CHECK=y
962CONFIG_FRAME_WARN=2048
963CONFIG_MAGIC_SYSRQ=y
964# CONFIG_STRIP_ASM_SYMS is not set
965# CONFIG_UNUSED_SYMBOLS is not set
966# CONFIG_DEBUG_FS is not set
967# CONFIG_HEADERS_CHECK is not set
968# CONFIG_DEBUG_SECTION_MISMATCH is not set
969CONFIG_DEBUG_KERNEL=y
970# CONFIG_DEBUG_SHIRQ is not set
971# CONFIG_LOCKUP_DETECTOR is not set
972# CONFIG_HARDLOCKUP_DETECTOR is not set
973CONFIG_DETECT_HUNG_TASK=y
974# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
975CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
976CONFIG_SCHED_DEBUG=y
977# CONFIG_SCHEDSTATS is not set
978# CONFIG_TIMER_STATS is not set
979# CONFIG_DEBUG_OBJECTS is not set
980# CONFIG_SLUB_DEBUG_ON is not set
981# CONFIG_SLUB_STATS is not set
982# CONFIG_DEBUG_KMEMLEAK is not set
983# CONFIG_DEBUG_RT_MUTEXES is not set
984# CONFIG_RT_MUTEX_TESTER is not set
985# CONFIG_DEBUG_SPINLOCK is not set
986# CONFIG_DEBUG_MUTEXES is not set
987# CONFIG_DEBUG_LOCK_ALLOC is not set
988# CONFIG_PROVE_LOCKING is not set
989# CONFIG_SPARSE_RCU_POINTER is not set
990# CONFIG_LOCK_STAT is not set
991CONFIG_DEBUG_SPINLOCK_SLEEP=y
992# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
993CONFIG_STACKTRACE=y
994# CONFIG_DEBUG_KOBJECT is not set
995# CONFIG_DEBUG_HIGHMEM is not set
996CONFIG_DEBUG_INFO=y
997# CONFIG_DEBUG_INFO_REDUCED is not set
998CONFIG_DEBUG_VM=y
999# CONFIG_DEBUG_WRITECOUNT is not set
1000# CONFIG_DEBUG_MEMORY_INIT is not set
1001# CONFIG_DEBUG_LIST is not set
1002# CONFIG_TEST_LIST_SORT is not set
1003# CONFIG_DEBUG_SG is not set
1004# CONFIG_DEBUG_NOTIFIERS is not set
1005# CONFIG_DEBUG_CREDENTIALS is not set
1006# CONFIG_RCU_TORTURE_TEST is not set
1007# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1008# CONFIG_BACKTRACE_SELF_TEST is not set
1009# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1010# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
1011# CONFIG_FAULT_INJECTION is not set
1012# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1013# CONFIG_DEBUG_PAGEALLOC is not set
1014CONFIG_TRACING_SUPPORT=y
1015CONFIG_FTRACE=y
1016# CONFIG_IRQSOFF_TRACER is not set
1017# CONFIG_SCHED_TRACER is not set
1018# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1019CONFIG_BRANCH_PROFILE_NONE=y
1020# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1021# CONFIG_PROFILE_ALL_BRANCHES is not set
1022# CONFIG_BLK_DEV_IO_TRACE is not set
1023# CONFIG_ATOMIC64_SELFTEST is not set
1024# CONFIG_SAMPLES is not set
1025# CONFIG_TEST_KSTRTOX is not set
1026CONFIG_EARLY_PRINTK=y
1027CONFIG_DEBUG_STACKOVERFLOW=y
1028# CONFIG_DEBUG_STACK_USAGE is not set
1029CONFIG_DEBUG_EXTRA_FLAGS="-femit-struct-debug-baseonly"
1030
1031#
1032# Security options
1033#
1034# CONFIG_KEYS is not set
1035# CONFIG_SECURITY_DMESG_RESTRICT is not set
1036# CONFIG_SECURITY is not set
1037# CONFIG_SECURITYFS is not set
1038CONFIG_DEFAULT_SECURITY_DAC=y
1039CONFIG_DEFAULT_SECURITY=""
1040CONFIG_CRYPTO=y
1041
1042#
1043# Crypto core or helper
1044#
1045# CONFIG_CRYPTO_FIPS is not set
1046CONFIG_CRYPTO_ALGAPI=m
1047CONFIG_CRYPTO_ALGAPI2=m
1048CONFIG_CRYPTO_RNG=m
1049CONFIG_CRYPTO_RNG2=m
1050# CONFIG_CRYPTO_MANAGER is not set
1051# CONFIG_CRYPTO_MANAGER2 is not set
1052# CONFIG_CRYPTO_GF128MUL is not set
1053# CONFIG_CRYPTO_NULL is not set
1054# CONFIG_CRYPTO_PCRYPT is not set
1055# CONFIG_CRYPTO_CRYPTD is not set
1056# CONFIG_CRYPTO_AUTHENC is not set
1057# CONFIG_CRYPTO_TEST is not set
1058
1059#
1060# Authenticated Encryption with Associated Data
1061#
1062# CONFIG_CRYPTO_CCM is not set
1063# CONFIG_CRYPTO_GCM is not set
1064# CONFIG_CRYPTO_SEQIV is not set
1065
1066#
1067# Block modes
1068#
1069# CONFIG_CRYPTO_CBC is not set
1070# CONFIG_CRYPTO_CTR is not set
1071# CONFIG_CRYPTO_CTS is not set
1072# CONFIG_CRYPTO_ECB is not set
1073# CONFIG_CRYPTO_LRW is not set
1074# CONFIG_CRYPTO_PCBC is not set
1075# CONFIG_CRYPTO_XTS is not set
1076
1077#
1078# Hash modes
1079#
1080# CONFIG_CRYPTO_HMAC is not set
1081# CONFIG_CRYPTO_XCBC is not set
1082# CONFIG_CRYPTO_VMAC is not set
1083
1084#
1085# Digest
1086#
1087# CONFIG_CRYPTO_CRC32C is not set
1088# CONFIG_CRYPTO_GHASH is not set
1089# CONFIG_CRYPTO_MD4 is not set
1090# CONFIG_CRYPTO_MD5 is not set
1091# CONFIG_CRYPTO_MICHAEL_MIC is not set
1092# CONFIG_CRYPTO_RMD128 is not set
1093# CONFIG_CRYPTO_RMD160 is not set
1094# CONFIG_CRYPTO_RMD256 is not set
1095# CONFIG_CRYPTO_RMD320 is not set
1096# CONFIG_CRYPTO_SHA1 is not set
1097# CONFIG_CRYPTO_SHA256 is not set
1098# CONFIG_CRYPTO_SHA512 is not set
1099# CONFIG_CRYPTO_TGR192 is not set
1100# CONFIG_CRYPTO_WP512 is not set
1101
1102#
1103# Ciphers
1104#
1105CONFIG_CRYPTO_AES=m
1106# CONFIG_CRYPTO_ANUBIS is not set
1107# CONFIG_CRYPTO_ARC4 is not set
1108# CONFIG_CRYPTO_BLOWFISH is not set
1109# CONFIG_CRYPTO_CAMELLIA is not set
1110# CONFIG_CRYPTO_CAST5 is not set
1111# CONFIG_CRYPTO_CAST6 is not set
1112# CONFIG_CRYPTO_DES is not set
1113# CONFIG_CRYPTO_FCRYPT is not set
1114# CONFIG_CRYPTO_KHAZAD is not set
1115# CONFIG_CRYPTO_SALSA20 is not set
1116# CONFIG_CRYPTO_SEED is not set
1117# CONFIG_CRYPTO_SERPENT is not set
1118# CONFIG_CRYPTO_TEA is not set
1119# CONFIG_CRYPTO_TWOFISH is not set
1120
1121#
1122# Compression
1123#
1124# CONFIG_CRYPTO_DEFLATE is not set
1125# CONFIG_CRYPTO_ZLIB is not set
1126# CONFIG_CRYPTO_LZO is not set
1127
1128#
1129# Random Number Generation
1130#
1131CONFIG_CRYPTO_ANSI_CPRNG=m
1132# CONFIG_CRYPTO_USER_API_HASH is not set
1133# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
1134CONFIG_CRYPTO_HW=y
1135# CONFIG_CRYPTO_DEV_HIFN_795X is not set
1136# CONFIG_BINARY_PRINTF is not set
1137
1138#
1139# Library routines
1140#
1141CONFIG_BITREVERSE=y
1142CONFIG_GENERIC_FIND_FIRST_BIT=y
1143CONFIG_GENERIC_FIND_NEXT_BIT=y
1144CONFIG_GENERIC_FIND_LAST_BIT=y
1145# CONFIG_CRC_CCITT is not set
1146# CONFIG_CRC16 is not set
1147# CONFIG_CRC_T10DIF is not set
1148# CONFIG_CRC_ITU_T is not set
1149CONFIG_CRC32=y
1150# CONFIG_CRC7 is not set
1151# CONFIG_LIBCRC32C is not set
1152CONFIG_ZLIB_INFLATE=y
1153# CONFIG_XZ_DEC is not set
1154# CONFIG_XZ_DEC_BCJ is not set
1155CONFIG_DECOMPRESS_GZIP=y
1156CONFIG_HAS_IOMEM=y
1157CONFIG_HAS_IOPORT=y
1158CONFIG_HAS_DMA=y
1159CONFIG_CPU_RMAP=y
1160CONFIG_NLATTR=y
1161# CONFIG_AVERAGE is not set
1162CONFIG_HAVE_KVM=y
1163# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/tile/include/arch/chip_tilegx.h b/arch/tile/include/arch/chip_tilegx.h
new file mode 100644
index 000000000000..ea8e4f2c9483
--- /dev/null
+++ b/arch/tile/include/arch/chip_tilegx.h
@@ -0,0 +1,258 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/*
16 * @file
17 * Global header file.
18 * This header file specifies defines for TILE-Gx.
19 */
20
21#ifndef __ARCH_CHIP_H__
22#define __ARCH_CHIP_H__
23
24/** Specify chip version.
25 * When possible, prefer the CHIP_xxx symbols below for future-proofing.
26 * This is intended for cross-compiling; native compilation should
27 * use the predefined __tile_chip__ symbol.
28 */
29#define TILE_CHIP 10
30
31/** Specify chip revision.
32 * This provides for the case of a respin of a particular chip type;
33 * the normal value for this symbol is "0".
34 * This is intended for cross-compiling; native compilation should
35 * use the predefined __tile_chip_rev__ symbol.
36 */
37#define TILE_CHIP_REV 0
38
39/** The name of this architecture. */
40#define CHIP_ARCH_NAME "tilegx"
41
42/** The ELF e_machine type for binaries for this chip. */
43#define CHIP_ELF_TYPE() EM_TILEGX
44
45/** The alternate ELF e_machine type for binaries for this chip. */
46#define CHIP_COMPAT_ELF_TYPE() 0x2597
47
48/** What is the native word size of the machine? */
49#define CHIP_WORD_SIZE() 64
50
51/** How many bits of a virtual address are used. Extra bits must be
52 * the sign extension of the low bits.
53 */
54#define CHIP_VA_WIDTH() 42
55
56/** How many bits are in a physical address? */
57#define CHIP_PA_WIDTH() 40
58
59/** Size of the L2 cache, in bytes. */
60#define CHIP_L2_CACHE_SIZE() 262144
61
62/** Log size of an L2 cache line in bytes. */
63#define CHIP_L2_LOG_LINE_SIZE() 6
64
65/** Size of an L2 cache line, in bytes. */
66#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
67
68/** Associativity of the L2 cache. */
69#define CHIP_L2_ASSOC() 8
70
71/** Size of the L1 data cache, in bytes. */
72#define CHIP_L1D_CACHE_SIZE() 32768
73
74/** Log size of an L1 data cache line in bytes. */
75#define CHIP_L1D_LOG_LINE_SIZE() 6
76
77/** Size of an L1 data cache line, in bytes. */
78#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
79
80/** Associativity of the L1 data cache. */
81#define CHIP_L1D_ASSOC() 2
82
83/** Size of the L1 instruction cache, in bytes. */
84#define CHIP_L1I_CACHE_SIZE() 32768
85
86/** Log size of an L1 instruction cache line in bytes. */
87#define CHIP_L1I_LOG_LINE_SIZE() 6
88
89/** Size of an L1 instruction cache line, in bytes. */
90#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
91
92/** Associativity of the L1 instruction cache. */
93#define CHIP_L1I_ASSOC() 2
94
95/** Stride with which flush instructions must be issued. */
96#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
97
98/** Stride with which inv instructions must be issued. */
99#define CHIP_INV_STRIDE() CHIP_L2_LINE_SIZE()
100
101/** Stride with which finv instructions must be issued. */
102#define CHIP_FINV_STRIDE() CHIP_L2_LINE_SIZE()
103
104/** Can the local cache coherently cache data that is homed elsewhere? */
105#define CHIP_HAS_COHERENT_LOCAL_CACHE() 1
106
107/** How many simultaneous outstanding victims can the L2 cache have? */
108#define CHIP_MAX_OUTSTANDING_VICTIMS() 128
109
110/** Does the TLB support the NC and NOALLOC bits? */
111#define CHIP_HAS_NC_AND_NOALLOC_BITS() 1
112
113/** Does the chip support hash-for-home caching? */
114#define CHIP_HAS_CBOX_HOME_MAP() 1
115
116/** Number of entries in the chip's home map tables. */
117#define CHIP_CBOX_HOME_MAP_SIZE() 128
118
119/** Do uncacheable requests miss in the cache regardless of whether
120 * there is matching data? */
121#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 1
122
123/** Does the mf instruction wait for victims? */
124#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 0
125
126/** Does the chip have an "inv" instruction that doesn't also flush? */
127#define CHIP_HAS_INV() 1
128
129/** Does the chip have a "wh64" instruction? */
130#define CHIP_HAS_WH64() 1
131
132/** Does this chip have a 'dword_align' instruction? */
133#define CHIP_HAS_DWORD_ALIGN() 0
134
135/** Number of performance counters. */
136#define CHIP_PERFORMANCE_COUNTERS() 4
137
138/** Does this chip have auxiliary performance counters? */
139#define CHIP_HAS_AUX_PERF_COUNTERS() 1
140
141/** Is the CBOX_MSR1 SPR supported? */
142#define CHIP_HAS_CBOX_MSR1() 0
143
144/** Is the TILE_RTF_HWM SPR supported? */
145#define CHIP_HAS_TILE_RTF_HWM() 1
146
147/** Is the TILE_WRITE_PENDING SPR supported? */
148#define CHIP_HAS_TILE_WRITE_PENDING() 0
149
150/** Is the PROC_STATUS SPR supported? */
151#define CHIP_HAS_PROC_STATUS_SPR() 1
152
153/** Is the DSTREAM_PF SPR supported? */
154#define CHIP_HAS_DSTREAM_PF() 1
155
156/** Log of the number of mshims we have. */
157#define CHIP_LOG_NUM_MSHIMS() 2
158
159/** Are the bases of the interrupt vector areas fixed? */
160#define CHIP_HAS_FIXED_INTVEC_BASE() 0
161
162/** Are the interrupt masks split up into 2 SPRs? */
163#define CHIP_HAS_SPLIT_INTR_MASK() 0
164
165/** Is the cycle count split up into 2 SPRs? */
166#define CHIP_HAS_SPLIT_CYCLE() 0
167
168/** Does the chip have a static network? */
169#define CHIP_HAS_SN() 0
170
171/** Does the chip have a static network processor? */
172#define CHIP_HAS_SN_PROC() 0
173
174/** Size of the L1 static network processor instruction cache, in bytes. */
175/* #define CHIP_L1SNI_CACHE_SIZE() -- does not apply to chip 10 */
176
177/** Does the chip have DMA support in each tile? */
178#define CHIP_HAS_TILE_DMA() 0
179
180/** Does the chip have the second revision of the directly accessible
181 * dynamic networks? This encapsulates a number of characteristics,
182 * including the absence of the catch-all, the absence of inline message
183 * tags, the absence of support for network context-switching, and so on.
184 */
185#define CHIP_HAS_REV1_XDN() 1
186
187/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
188#define CHIP_HAS_CMPEXCH() 1
189
190/** Does the chip have memory-mapped I/O support? */
191#define CHIP_HAS_MMIO() 1
192
193/** Does the chip have post-completion interrupts? */
194#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 1
195
196/** Does the chip have native single step support? */
197#define CHIP_HAS_SINGLE_STEP() 1
198
199#ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */
200
201/** How many entries are present in the instruction TLB? */
202#define CHIP_ITLB_ENTRIES() 16
203
204/** How many entries are present in the data TLB? */
205#define CHIP_DTLB_ENTRIES() 32
206
207/** How many MAF entries does the XAUI shim have? */
208#define CHIP_XAUI_MAF_ENTRIES() 32
209
210/** Does the memory shim have a source-id table? */
211#define CHIP_HAS_MSHIM_SRCID_TABLE() 0
212
213/** Does the L1 instruction cache clear on reset? */
214#define CHIP_HAS_L1I_CLEAR_ON_RESET() 1
215
216/** Does the chip come out of reset with valid coordinates on all tiles?
217 * Note that if defined, this also implies that the upper left is 1,1.
218 */
219#define CHIP_HAS_VALID_TILE_COORD_RESET() 1
220
221/** Does the chip have unified packet formats? */
222#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 1
223
224/** Does the chip support write reordering? */
225#define CHIP_HAS_WRITE_REORDERING() 1
226
227/** Does the chip support Y-X routing as well as X-Y? */
228#define CHIP_HAS_Y_X_ROUTING() 1
229
230/** Is INTCTRL_3 managed with the correct MPL? */
231#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 1
232
233/** Is it possible to configure the chip to be big-endian? */
234#define CHIP_HAS_BIG_ENDIAN_CONFIG() 1
235
236/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
237#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0
238
239/** Is the DIAG_TRACE_WAY SPR supported? */
240#define CHIP_HAS_DIAG_TRACE_WAY() 0
241
242/** Is the MEM_STRIPE_CONFIG SPR supported? */
243#define CHIP_HAS_MEM_STRIPE_CONFIG() 1
244
245/** Are the TLB_PERF SPRs supported? */
246#define CHIP_HAS_TLB_PERF() 1
247
248/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
249#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0
250
251/** Does the chip support rev1 DMA packets? */
252#define CHIP_HAS_REV1_DMA_PACKETS() 1
253
254/** Does the chip have an IPI shim? */
255#define CHIP_HAS_IPI() 1
256
257#endif /* !__OPEN_SOURCE__ */
258#endif /* __ARCH_CHIP_H__ */
diff --git a/arch/tile/include/arch/icache.h b/arch/tile/include/arch/icache.h
index 5c87c9016338..762eafa8a11e 100644
--- a/arch/tile/include/arch/icache.h
+++ b/arch/tile/include/arch/icache.h
@@ -16,7 +16,7 @@
16/** 16/**
17 * @file 17 * @file
18 * 18 *
19 * Support for invalidating bytes in the instruction 19 * Support for invalidating bytes in the instruction cache.
20 */ 20 */
21 21
22#ifndef __ARCH_ICACHE_H__ 22#ifndef __ARCH_ICACHE_H__
@@ -30,11 +30,10 @@
30 * 30 *
31 * @param addr The start of memory to be invalidated. 31 * @param addr The start of memory to be invalidated.
32 * @param size The number of bytes to be invalidated. 32 * @param size The number of bytes to be invalidated.
33 * @param page_size The system's page size, typically the PAGE_SIZE constant 33 * @param page_size The system's page size, e.g. getpagesize() in userspace.
34 * in sys/page.h. This value must be a power of two no larger 34 * This value must be a power of two no larger than the page containing
35 * than the page containing the code to be invalidated. If the value 35 * the code to be invalidated. If the value is smaller than the actual page
36 * is smaller than the actual page size, this function will still 36 * size, this function will still work, but may run slower than necessary.
37 * work, but may run slower than necessary.
38 */ 37 */
39static __inline void 38static __inline void
40invalidate_icache(const void* addr, unsigned long size, 39invalidate_icache(const void* addr, unsigned long size,
diff --git a/arch/tile/include/arch/interrupts_64.h b/arch/tile/include/arch/interrupts_64.h
new file mode 100644
index 000000000000..5bb58b2e4e6f
--- /dev/null
+++ b/arch/tile/include/arch/interrupts_64.h
@@ -0,0 +1,276 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef __ARCH_INTERRUPTS_H__
16#define __ARCH_INTERRUPTS_H__
17
18/** Mask for an interrupt. */
19#ifdef __ASSEMBLER__
20/* Note: must handle breaking interrupts into high and low words manually. */
21#define INT_MASK(intno) (1 << (intno))
22#else
23#define INT_MASK(intno) (1ULL << (intno))
24#endif
25
26
27/** Where a given interrupt executes */
28#define INTERRUPT_VECTOR(i, pl) (0xFC000000 + ((pl) << 24) + ((i) << 8))
29
30/** Where to store a vector for a given interrupt. */
31#define USER_INTERRUPT_VECTOR(i) INTERRUPT_VECTOR(i, 0)
32
33/** The base address of user-level interrupts. */
34#define USER_INTERRUPT_VECTOR_BASE INTERRUPT_VECTOR(0, 0)
35
36
37/** Additional synthetic interrupt. */
38#define INT_BREAKPOINT (63)
39
40#define INT_MEM_ERROR 0
41#define INT_SINGLE_STEP_3 1
42#define INT_SINGLE_STEP_2 2
43#define INT_SINGLE_STEP_1 3
44#define INT_SINGLE_STEP_0 4
45#define INT_IDN_COMPLETE 5
46#define INT_UDN_COMPLETE 6
47#define INT_ITLB_MISS 7
48#define INT_ILL 8
49#define INT_GPV 9
50#define INT_IDN_ACCESS 10
51#define INT_UDN_ACCESS 11
52#define INT_SWINT_3 12
53#define INT_SWINT_2 13
54#define INT_SWINT_1 14
55#define INT_SWINT_0 15
56#define INT_ILL_TRANS 16
57#define INT_UNALIGN_DATA 17
58#define INT_DTLB_MISS 18
59#define INT_DTLB_ACCESS 19
60#define INT_IDN_FIREWALL 20
61#define INT_UDN_FIREWALL 21
62#define INT_TILE_TIMER 22
63#define INT_AUX_TILE_TIMER 23
64#define INT_IDN_TIMER 24
65#define INT_UDN_TIMER 25
66#define INT_IDN_AVAIL 26
67#define INT_UDN_AVAIL 27
68#define INT_IPI_3 28
69#define INT_IPI_2 29
70#define INT_IPI_1 30
71#define INT_IPI_0 31
72#define INT_PERF_COUNT 32
73#define INT_AUX_PERF_COUNT 33
74#define INT_INTCTRL_3 34
75#define INT_INTCTRL_2 35
76#define INT_INTCTRL_1 36
77#define INT_INTCTRL_0 37
78#define INT_BOOT_ACCESS 38
79#define INT_WORLD_ACCESS 39
80#define INT_I_ASID 40
81#define INT_D_ASID 41
82#define INT_DOUBLE_FAULT 42
83
84#define NUM_INTERRUPTS 43
85
86#ifndef __ASSEMBLER__
87#define QUEUED_INTERRUPTS ( \
88 INT_MASK(INT_MEM_ERROR) | \
89 INT_MASK(INT_IDN_COMPLETE) | \
90 INT_MASK(INT_UDN_COMPLETE) | \
91 INT_MASK(INT_IDN_FIREWALL) | \
92 INT_MASK(INT_UDN_FIREWALL) | \
93 INT_MASK(INT_TILE_TIMER) | \
94 INT_MASK(INT_AUX_TILE_TIMER) | \
95 INT_MASK(INT_IDN_TIMER) | \
96 INT_MASK(INT_UDN_TIMER) | \
97 INT_MASK(INT_IDN_AVAIL) | \
98 INT_MASK(INT_UDN_AVAIL) | \
99 INT_MASK(INT_IPI_3) | \
100 INT_MASK(INT_IPI_2) | \
101 INT_MASK(INT_IPI_1) | \
102 INT_MASK(INT_IPI_0) | \
103 INT_MASK(INT_PERF_COUNT) | \
104 INT_MASK(INT_AUX_PERF_COUNT) | \
105 INT_MASK(INT_INTCTRL_3) | \
106 INT_MASK(INT_INTCTRL_2) | \
107 INT_MASK(INT_INTCTRL_1) | \
108 INT_MASK(INT_INTCTRL_0) | \
109 INT_MASK(INT_BOOT_ACCESS) | \
110 INT_MASK(INT_WORLD_ACCESS) | \
111 INT_MASK(INT_I_ASID) | \
112 INT_MASK(INT_D_ASID) | \
113 INT_MASK(INT_DOUBLE_FAULT) | \
114 0)
115#define NONQUEUED_INTERRUPTS ( \
116 INT_MASK(INT_SINGLE_STEP_3) | \
117 INT_MASK(INT_SINGLE_STEP_2) | \
118 INT_MASK(INT_SINGLE_STEP_1) | \
119 INT_MASK(INT_SINGLE_STEP_0) | \
120 INT_MASK(INT_ITLB_MISS) | \
121 INT_MASK(INT_ILL) | \
122 INT_MASK(INT_GPV) | \
123 INT_MASK(INT_IDN_ACCESS) | \
124 INT_MASK(INT_UDN_ACCESS) | \
125 INT_MASK(INT_SWINT_3) | \
126 INT_MASK(INT_SWINT_2) | \
127 INT_MASK(INT_SWINT_1) | \
128 INT_MASK(INT_SWINT_0) | \
129 INT_MASK(INT_ILL_TRANS) | \
130 INT_MASK(INT_UNALIGN_DATA) | \
131 INT_MASK(INT_DTLB_MISS) | \
132 INT_MASK(INT_DTLB_ACCESS) | \
133 0)
134#define CRITICAL_MASKED_INTERRUPTS ( \
135 INT_MASK(INT_MEM_ERROR) | \
136 INT_MASK(INT_SINGLE_STEP_3) | \
137 INT_MASK(INT_SINGLE_STEP_2) | \
138 INT_MASK(INT_SINGLE_STEP_1) | \
139 INT_MASK(INT_SINGLE_STEP_0) | \
140 INT_MASK(INT_IDN_COMPLETE) | \
141 INT_MASK(INT_UDN_COMPLETE) | \
142 INT_MASK(INT_IDN_FIREWALL) | \
143 INT_MASK(INT_UDN_FIREWALL) | \
144 INT_MASK(INT_TILE_TIMER) | \
145 INT_MASK(INT_AUX_TILE_TIMER) | \
146 INT_MASK(INT_IDN_TIMER) | \
147 INT_MASK(INT_UDN_TIMER) | \
148 INT_MASK(INT_IDN_AVAIL) | \
149 INT_MASK(INT_UDN_AVAIL) | \
150 INT_MASK(INT_IPI_3) | \
151 INT_MASK(INT_IPI_2) | \
152 INT_MASK(INT_IPI_1) | \
153 INT_MASK(INT_IPI_0) | \
154 INT_MASK(INT_PERF_COUNT) | \
155 INT_MASK(INT_AUX_PERF_COUNT) | \
156 INT_MASK(INT_INTCTRL_3) | \
157 INT_MASK(INT_INTCTRL_2) | \
158 INT_MASK(INT_INTCTRL_1) | \
159 INT_MASK(INT_INTCTRL_0) | \
160 0)
161#define CRITICAL_UNMASKED_INTERRUPTS ( \
162 INT_MASK(INT_ITLB_MISS) | \
163 INT_MASK(INT_ILL) | \
164 INT_MASK(INT_GPV) | \
165 INT_MASK(INT_IDN_ACCESS) | \
166 INT_MASK(INT_UDN_ACCESS) | \
167 INT_MASK(INT_SWINT_3) | \
168 INT_MASK(INT_SWINT_2) | \
169 INT_MASK(INT_SWINT_1) | \
170 INT_MASK(INT_SWINT_0) | \
171 INT_MASK(INT_ILL_TRANS) | \
172 INT_MASK(INT_UNALIGN_DATA) | \
173 INT_MASK(INT_DTLB_MISS) | \
174 INT_MASK(INT_DTLB_ACCESS) | \
175 INT_MASK(INT_BOOT_ACCESS) | \
176 INT_MASK(INT_WORLD_ACCESS) | \
177 INT_MASK(INT_I_ASID) | \
178 INT_MASK(INT_D_ASID) | \
179 INT_MASK(INT_DOUBLE_FAULT) | \
180 0)
181#define MASKABLE_INTERRUPTS ( \
182 INT_MASK(INT_MEM_ERROR) | \
183 INT_MASK(INT_SINGLE_STEP_3) | \
184 INT_MASK(INT_SINGLE_STEP_2) | \
185 INT_MASK(INT_SINGLE_STEP_1) | \
186 INT_MASK(INT_SINGLE_STEP_0) | \
187 INT_MASK(INT_IDN_COMPLETE) | \
188 INT_MASK(INT_UDN_COMPLETE) | \
189 INT_MASK(INT_IDN_FIREWALL) | \
190 INT_MASK(INT_UDN_FIREWALL) | \
191 INT_MASK(INT_TILE_TIMER) | \
192 INT_MASK(INT_AUX_TILE_TIMER) | \
193 INT_MASK(INT_IDN_TIMER) | \
194 INT_MASK(INT_UDN_TIMER) | \
195 INT_MASK(INT_IDN_AVAIL) | \
196 INT_MASK(INT_UDN_AVAIL) | \
197 INT_MASK(INT_IPI_3) | \
198 INT_MASK(INT_IPI_2) | \
199 INT_MASK(INT_IPI_1) | \
200 INT_MASK(INT_IPI_0) | \
201 INT_MASK(INT_PERF_COUNT) | \
202 INT_MASK(INT_AUX_PERF_COUNT) | \
203 INT_MASK(INT_INTCTRL_3) | \
204 INT_MASK(INT_INTCTRL_2) | \
205 INT_MASK(INT_INTCTRL_1) | \
206 INT_MASK(INT_INTCTRL_0) | \
207 0)
208#define UNMASKABLE_INTERRUPTS ( \
209 INT_MASK(INT_ITLB_MISS) | \
210 INT_MASK(INT_ILL) | \
211 INT_MASK(INT_GPV) | \
212 INT_MASK(INT_IDN_ACCESS) | \
213 INT_MASK(INT_UDN_ACCESS) | \
214 INT_MASK(INT_SWINT_3) | \
215 INT_MASK(INT_SWINT_2) | \
216 INT_MASK(INT_SWINT_1) | \
217 INT_MASK(INT_SWINT_0) | \
218 INT_MASK(INT_ILL_TRANS) | \
219 INT_MASK(INT_UNALIGN_DATA) | \
220 INT_MASK(INT_DTLB_MISS) | \
221 INT_MASK(INT_DTLB_ACCESS) | \
222 INT_MASK(INT_BOOT_ACCESS) | \
223 INT_MASK(INT_WORLD_ACCESS) | \
224 INT_MASK(INT_I_ASID) | \
225 INT_MASK(INT_D_ASID) | \
226 INT_MASK(INT_DOUBLE_FAULT) | \
227 0)
228#define SYNC_INTERRUPTS ( \
229 INT_MASK(INT_SINGLE_STEP_3) | \
230 INT_MASK(INT_SINGLE_STEP_2) | \
231 INT_MASK(INT_SINGLE_STEP_1) | \
232 INT_MASK(INT_SINGLE_STEP_0) | \
233 INT_MASK(INT_IDN_COMPLETE) | \
234 INT_MASK(INT_UDN_COMPLETE) | \
235 INT_MASK(INT_ITLB_MISS) | \
236 INT_MASK(INT_ILL) | \
237 INT_MASK(INT_GPV) | \
238 INT_MASK(INT_IDN_ACCESS) | \
239 INT_MASK(INT_UDN_ACCESS) | \
240 INT_MASK(INT_SWINT_3) | \
241 INT_MASK(INT_SWINT_2) | \
242 INT_MASK(INT_SWINT_1) | \
243 INT_MASK(INT_SWINT_0) | \
244 INT_MASK(INT_ILL_TRANS) | \
245 INT_MASK(INT_UNALIGN_DATA) | \
246 INT_MASK(INT_DTLB_MISS) | \
247 INT_MASK(INT_DTLB_ACCESS) | \
248 0)
249#define NON_SYNC_INTERRUPTS ( \
250 INT_MASK(INT_MEM_ERROR) | \
251 INT_MASK(INT_IDN_FIREWALL) | \
252 INT_MASK(INT_UDN_FIREWALL) | \
253 INT_MASK(INT_TILE_TIMER) | \
254 INT_MASK(INT_AUX_TILE_TIMER) | \
255 INT_MASK(INT_IDN_TIMER) | \
256 INT_MASK(INT_UDN_TIMER) | \
257 INT_MASK(INT_IDN_AVAIL) | \
258 INT_MASK(INT_UDN_AVAIL) | \
259 INT_MASK(INT_IPI_3) | \
260 INT_MASK(INT_IPI_2) | \
261 INT_MASK(INT_IPI_1) | \
262 INT_MASK(INT_IPI_0) | \
263 INT_MASK(INT_PERF_COUNT) | \
264 INT_MASK(INT_AUX_PERF_COUNT) | \
265 INT_MASK(INT_INTCTRL_3) | \
266 INT_MASK(INT_INTCTRL_2) | \
267 INT_MASK(INT_INTCTRL_1) | \
268 INT_MASK(INT_INTCTRL_0) | \
269 INT_MASK(INT_BOOT_ACCESS) | \
270 INT_MASK(INT_WORLD_ACCESS) | \
271 INT_MASK(INT_I_ASID) | \
272 INT_MASK(INT_D_ASID) | \
273 INT_MASK(INT_DOUBLE_FAULT) | \
274 0)
275#endif /* !__ASSEMBLER__ */
276#endif /* !__ARCH_INTERRUPTS_H__ */
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h
index 442fcba0d122..f548efeb2de3 100644
--- a/arch/tile/include/arch/spr_def.h
+++ b/arch/tile/include/arch/spr_def.h
@@ -12,6 +12,15 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15/* Include the proper base SPR definition file. */
16#ifdef __tilegx__
17#include <arch/spr_def_64.h>
18#else
19#include <arch/spr_def_32.h>
20#endif
21
22#ifdef __KERNEL__
23
15/* 24/*
16 * In addition to including the proper base SPR definition file, depending 25 * In addition to including the proper base SPR definition file, depending
17 * on machine architecture, this file defines several macros which allow 26 * on machine architecture, this file defines several macros which allow
@@ -29,7 +38,6 @@
29#define _concat4(a, b, c, d) __concat4(a, b, c, d) 38#define _concat4(a, b, c, d) __concat4(a, b, c, d)
30 39
31#ifdef __tilegx__ 40#ifdef __tilegx__
32#include <arch/spr_def_64.h>
33 41
34/* TILE-Gx dependent, protection-level dependent SPRs. */ 42/* TILE-Gx dependent, protection-level dependent SPRs. */
35 43
@@ -65,7 +73,6 @@
65 _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,) 73 _concat4(INT_SINGLE_STEP_, CONFIG_KERNEL_PL,,)
66 74
67#else 75#else
68#include <arch/spr_def_32.h>
69 76
70/* TILEPro dependent, protection-level dependent SPRs. */ 77/* TILEPro dependent, protection-level dependent SPRs. */
71 78
@@ -102,3 +109,5 @@
102 _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,) 109 _concat4(SPR_INTCTRL_, CONFIG_KERNEL_PL, _STATUS,)
103#define INT_INTCTRL_K \ 110#define INT_INTCTRL_K \
104 _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,) 111 _concat4(INT_INTCTRL_, CONFIG_KERNEL_PL,,)
112
113#endif /* __KERNEL__ */
diff --git a/arch/tile/include/arch/spr_def_64.h b/arch/tile/include/arch/spr_def_64.h
new file mode 100644
index 000000000000..cd3e5f95d5fd
--- /dev/null
+++ b/arch/tile/include/arch/spr_def_64.h
@@ -0,0 +1,173 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef __DOXYGEN__
16
17#ifndef __ARCH_SPR_DEF_H__
18#define __ARCH_SPR_DEF_H__
19
20#define SPR_AUX_PERF_COUNT_0 0x2105
21#define SPR_AUX_PERF_COUNT_1 0x2106
22#define SPR_AUX_PERF_COUNT_CTL 0x2107
23#define SPR_AUX_PERF_COUNT_STS 0x2108
24#define SPR_CMPEXCH_VALUE 0x2780
25#define SPR_CYCLE 0x2781
26#define SPR_DONE 0x2705
27#define SPR_DSTREAM_PF 0x2706
28#define SPR_EVENT_BEGIN 0x2782
29#define SPR_EVENT_END 0x2783
30#define SPR_EX_CONTEXT_0_0 0x2580
31#define SPR_EX_CONTEXT_0_1 0x2581
32#define SPR_EX_CONTEXT_0_1__PL_SHIFT 0
33#define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3
34#define SPR_EX_CONTEXT_0_1__PL_MASK 0x3
35#define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2
36#define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1
37#define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4
38#define SPR_EX_CONTEXT_1_0 0x2480
39#define SPR_EX_CONTEXT_1_1 0x2481
40#define SPR_EX_CONTEXT_1_1__PL_SHIFT 0
41#define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3
42#define SPR_EX_CONTEXT_1_1__PL_MASK 0x3
43#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
44#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
45#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
46#define SPR_EX_CONTEXT_2_0 0x2380
47#define SPR_EX_CONTEXT_2_1 0x2381
48#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
49#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
50#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
51#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
52#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
53#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
54#define SPR_FAIL 0x2707
55#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
56#define SPR_INTCTRL_0_STATUS 0x2505
57#define SPR_INTCTRL_1_STATUS 0x2405
58#define SPR_INTCTRL_2_STATUS 0x2305
59#define SPR_INTERRUPT_CRITICAL_SECTION 0x2708
60#define SPR_INTERRUPT_MASK_0 0x2506
61#define SPR_INTERRUPT_MASK_1 0x2406
62#define SPR_INTERRUPT_MASK_2 0x2306
63#define SPR_INTERRUPT_MASK_RESET_0 0x2507
64#define SPR_INTERRUPT_MASK_RESET_1 0x2407
65#define SPR_INTERRUPT_MASK_RESET_2 0x2307
66#define SPR_INTERRUPT_MASK_SET_0 0x2508
67#define SPR_INTERRUPT_MASK_SET_1 0x2408
68#define SPR_INTERRUPT_MASK_SET_2 0x2308
69#define SPR_INTERRUPT_VECTOR_BASE_0 0x2509
70#define SPR_INTERRUPT_VECTOR_BASE_1 0x2409
71#define SPR_INTERRUPT_VECTOR_BASE_2 0x2309
72#define SPR_INTERRUPT_VECTOR_BASE_3 0x2209
73#define SPR_IPI_EVENT_0 0x1f05
74#define SPR_IPI_EVENT_1 0x1e05
75#define SPR_IPI_EVENT_2 0x1d05
76#define SPR_IPI_EVENT_RESET_0 0x1f06
77#define SPR_IPI_EVENT_RESET_1 0x1e06
78#define SPR_IPI_EVENT_RESET_2 0x1d06
79#define SPR_IPI_EVENT_SET_0 0x1f07
80#define SPR_IPI_EVENT_SET_1 0x1e07
81#define SPR_IPI_EVENT_SET_2 0x1d07
82#define SPR_IPI_MASK_0 0x1f08
83#define SPR_IPI_MASK_1 0x1e08
84#define SPR_IPI_MASK_2 0x1d08
85#define SPR_IPI_MASK_RESET_0 0x1f09
86#define SPR_IPI_MASK_RESET_1 0x1e09
87#define SPR_IPI_MASK_RESET_2 0x1d09
88#define SPR_IPI_MASK_SET_0 0x1f0a
89#define SPR_IPI_MASK_SET_1 0x1e0a
90#define SPR_IPI_MASK_SET_2 0x1d0a
91#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
92#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
93#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
94#define SPR_MPL_INTCTRL_0_SET_0 0x2500
95#define SPR_MPL_INTCTRL_0_SET_1 0x2501
96#define SPR_MPL_INTCTRL_0_SET_2 0x2502
97#define SPR_MPL_INTCTRL_1_SET_0 0x2400
98#define SPR_MPL_INTCTRL_1_SET_1 0x2401
99#define SPR_MPL_INTCTRL_1_SET_2 0x2402
100#define SPR_MPL_INTCTRL_2_SET_0 0x2300
101#define SPR_MPL_INTCTRL_2_SET_1 0x2301
102#define SPR_MPL_INTCTRL_2_SET_2 0x2302
103#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
104#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
105#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
106#define SPR_MPL_UDN_AVAIL_SET_0 0x1b00
107#define SPR_MPL_UDN_AVAIL_SET_1 0x1b01
108#define SPR_MPL_UDN_AVAIL_SET_2 0x1b02
109#define SPR_MPL_UDN_COMPLETE_SET_0 0x0600
110#define SPR_MPL_UDN_COMPLETE_SET_1 0x0601
111#define SPR_MPL_UDN_COMPLETE_SET_2 0x0602
112#define SPR_MPL_UDN_FIREWALL_SET_0 0x1500
113#define SPR_MPL_UDN_FIREWALL_SET_1 0x1501
114#define SPR_MPL_UDN_FIREWALL_SET_2 0x1502
115#define SPR_MPL_UDN_TIMER_SET_0 0x1900
116#define SPR_MPL_UDN_TIMER_SET_1 0x1901
117#define SPR_MPL_UDN_TIMER_SET_2 0x1902
118#define SPR_MPL_WORLD_ACCESS_SET_0 0x2700
119#define SPR_MPL_WORLD_ACCESS_SET_1 0x2701
120#define SPR_MPL_WORLD_ACCESS_SET_2 0x2702
121#define SPR_PASS 0x2709
122#define SPR_PERF_COUNT_0 0x2005
123#define SPR_PERF_COUNT_1 0x2006
124#define SPR_PERF_COUNT_CTL 0x2007
125#define SPR_PERF_COUNT_DN_CTL 0x2008
126#define SPR_PERF_COUNT_STS 0x2009
127#define SPR_PROC_STATUS 0x2784
128#define SPR_SIM_CONTROL 0x2785
129#define SPR_SINGLE_STEP_CONTROL_0 0x0405
130#define SPR_SINGLE_STEP_CONTROL_0__CANCELED_MASK 0x1
131#define SPR_SINGLE_STEP_CONTROL_0__INHIBIT_MASK 0x2
132#define SPR_SINGLE_STEP_CONTROL_1 0x0305
133#define SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK 0x1
134#define SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK 0x2
135#define SPR_SINGLE_STEP_CONTROL_2 0x0205
136#define SPR_SINGLE_STEP_CONTROL_2__CANCELED_MASK 0x1
137#define SPR_SINGLE_STEP_CONTROL_2__INHIBIT_MASK 0x2
138#define SPR_SINGLE_STEP_EN_0_0 0x250a
139#define SPR_SINGLE_STEP_EN_0_1 0x240a
140#define SPR_SINGLE_STEP_EN_0_2 0x230a
141#define SPR_SINGLE_STEP_EN_1_0 0x250b
142#define SPR_SINGLE_STEP_EN_1_1 0x240b
143#define SPR_SINGLE_STEP_EN_1_2 0x230b
144#define SPR_SINGLE_STEP_EN_2_0 0x250c
145#define SPR_SINGLE_STEP_EN_2_1 0x240c
146#define SPR_SINGLE_STEP_EN_2_2 0x230c
147#define SPR_SYSTEM_SAVE_0_0 0x2582
148#define SPR_SYSTEM_SAVE_0_1 0x2583
149#define SPR_SYSTEM_SAVE_0_2 0x2584
150#define SPR_SYSTEM_SAVE_0_3 0x2585
151#define SPR_SYSTEM_SAVE_1_0 0x2482
152#define SPR_SYSTEM_SAVE_1_1 0x2483
153#define SPR_SYSTEM_SAVE_1_2 0x2484
154#define SPR_SYSTEM_SAVE_1_3 0x2485
155#define SPR_SYSTEM_SAVE_2_0 0x2382
156#define SPR_SYSTEM_SAVE_2_1 0x2383
157#define SPR_SYSTEM_SAVE_2_2 0x2384
158#define SPR_SYSTEM_SAVE_2_3 0x2385
159#define SPR_TILE_COORD 0x270b
160#define SPR_TILE_RTF_HWM 0x270c
161#define SPR_TILE_TIMER_CONTROL 0x1605
162#define SPR_UDN_AVAIL_EN 0x1b05
163#define SPR_UDN_DATA_AVAIL 0x0b80
164#define SPR_UDN_DEADLOCK_TIMEOUT 0x1906
165#define SPR_UDN_DEMUX_COUNT_0 0x0b05
166#define SPR_UDN_DEMUX_COUNT_1 0x0b06
167#define SPR_UDN_DEMUX_COUNT_2 0x0b07
168#define SPR_UDN_DEMUX_COUNT_3 0x0b08
169#define SPR_UDN_DIRECTION_PROTECT 0x1505
170
171#endif /* !defined(__ARCH_SPR_DEF_H__) */
172
173#endif /* !defined(__DOXYGEN__) */
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index 75a16028a952..739cfe0499d1 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -130,17 +130,52 @@ static inline int atomic_read(const atomic_t *v)
130 */ 130 */
131#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 131#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
132 132
133
134/*
135 * We define xchg() and cmpxchg() in the included headers.
136 * Note that we do not define __HAVE_ARCH_CMPXCHG, since that would imply
137 * that cmpxchg() is an efficient operation, which is not particularly true.
138 */
139
140/* Nonexistent functions intended to cause link errors. */ 133/* Nonexistent functions intended to cause link errors. */
141extern unsigned long __xchg_called_with_bad_pointer(void); 134extern unsigned long __xchg_called_with_bad_pointer(void);
142extern unsigned long __cmpxchg_called_with_bad_pointer(void); 135extern unsigned long __cmpxchg_called_with_bad_pointer(void);
143 136
137#define xchg(ptr, x) \
138 ({ \
139 typeof(*(ptr)) __x; \
140 switch (sizeof(*(ptr))) { \
141 case 4: \
142 __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
143 (atomic_t *)(ptr), \
144 (u32)(typeof((x)-(x)))(x)); \
145 break; \
146 case 8: \
147 __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
148 (atomic64_t *)(ptr), \
149 (u64)(typeof((x)-(x)))(x)); \
150 break; \
151 default: \
152 __xchg_called_with_bad_pointer(); \
153 } \
154 __x; \
155 })
156
157#define cmpxchg(ptr, o, n) \
158 ({ \
159 typeof(*(ptr)) __x; \
160 switch (sizeof(*(ptr))) { \
161 case 4: \
162 __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
163 (atomic_t *)(ptr), \
164 (u32)(typeof((o)-(o)))(o), \
165 (u32)(typeof((n)-(n)))(n)); \
166 break; \
167 case 8: \
168 __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
169 (atomic64_t *)(ptr), \
170 (u64)(typeof((o)-(o)))(o), \
171 (u64)(typeof((n)-(n)))(n)); \
172 break; \
173 default: \
174 __cmpxchg_called_with_bad_pointer(); \
175 } \
176 __x; \
177 })
178
144#define tas(ptr) (xchg((ptr), 1)) 179#define tas(ptr) (xchg((ptr), 1))
145 180
146#endif /* __ASSEMBLY__ */ 181#endif /* __ASSEMBLY__ */
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index ed359aee8837..92a8bee32311 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -110,16 +110,6 @@ static inline void atomic_set(atomic_t *v, int n)
110 _atomic_xchg(v, n); 110 _atomic_xchg(v, n);
111} 111}
112 112
113#define xchg(ptr, x) ((typeof(*(ptr))) \
114 ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
115 atomic_xchg((atomic_t *)(ptr), (long)(x)) : \
116 __xchg_called_with_bad_pointer()))
117
118#define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \
119 ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
120 atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \
121 __cmpxchg_called_with_bad_pointer()))
122
123/* A 64bit atomic type */ 113/* A 64bit atomic type */
124 114
125typedef struct { 115typedef struct {
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
new file mode 100644
index 000000000000..1c1e60d8ccb6
--- /dev/null
+++ b/arch/tile/include/asm/atomic_64.h
@@ -0,0 +1,156 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Do not include directly; use <asm/atomic.h>.
15 */
16
17#ifndef _ASM_TILE_ATOMIC_64_H
18#define _ASM_TILE_ATOMIC_64_H
19
20#ifndef __ASSEMBLY__
21
22#include <arch/spr_def.h>
23
24/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
25
26#define atomic_set(v, i) ((v)->counter = (i))
27
28/*
29 * The smp_mb() operations throughout are to support the fact that
30 * Linux requires memory barriers before and after the operation,
31 * on any routine which updates memory and returns a value.
32 */
33
34static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
35{
36 int val;
37 __insn_mtspr(SPR_CMPEXCH_VALUE, o);
38 smp_mb(); /* barrier for proper semantics */
39 val = __insn_cmpexch4((void *)&v->counter, n);
40 smp_mb(); /* barrier for proper semantics */
41 return val;
42}
43
44static inline int atomic_xchg(atomic_t *v, int n)
45{
46 int val;
47 smp_mb(); /* barrier for proper semantics */
48 val = __insn_exch4((void *)&v->counter, n);
49 smp_mb(); /* barrier for proper semantics */
50 return val;
51}
52
53static inline void atomic_add(int i, atomic_t *v)
54{
55 __insn_fetchadd4((void *)&v->counter, i);
56}
57
58static inline int atomic_add_return(int i, atomic_t *v)
59{
60 int val;
61 smp_mb(); /* barrier for proper semantics */
62 val = __insn_fetchadd4((void *)&v->counter, i) + i;
63 barrier(); /* the "+ i" above will wait on memory */
64 return val;
65}
66
67static inline int atomic_add_unless(atomic_t *v, int a, int u)
68{
69 int guess, oldval = v->counter;
70 do {
71 if (oldval == u)
72 break;
73 guess = oldval;
74 oldval = atomic_cmpxchg(v, guess, guess + a);
75 } while (guess != oldval);
76 return oldval != u;
77}
78
79/* Now the true 64-bit operations. */
80
81#define ATOMIC64_INIT(i) { (i) }
82
83#define atomic64_read(v) ((v)->counter)
84#define atomic64_set(v, i) ((v)->counter = (i))
85
86static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
87{
88 long val;
89 smp_mb(); /* barrier for proper semantics */
90 __insn_mtspr(SPR_CMPEXCH_VALUE, o);
91 val = __insn_cmpexch((void *)&v->counter, n);
92 smp_mb(); /* barrier for proper semantics */
93 return val;
94}
95
96static inline long atomic64_xchg(atomic64_t *v, long n)
97{
98 long val;
99 smp_mb(); /* barrier for proper semantics */
100 val = __insn_exch((void *)&v->counter, n);
101 smp_mb(); /* barrier for proper semantics */
102 return val;
103}
104
105static inline void atomic64_add(long i, atomic64_t *v)
106{
107 __insn_fetchadd((void *)&v->counter, i);
108}
109
110static inline long atomic64_add_return(long i, atomic64_t *v)
111{
112 int val;
113 smp_mb(); /* barrier for proper semantics */
114 val = __insn_fetchadd((void *)&v->counter, i) + i;
115 barrier(); /* the "+ i" above will wait on memory */
116 return val;
117}
118
119static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
120{
121 long guess, oldval = v->counter;
122 do {
123 if (oldval == u)
124 break;
125 guess = oldval;
126 oldval = atomic64_cmpxchg(v, guess, guess + a);
127 } while (guess != oldval);
128 return oldval != u;
129}
130
131#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
132#define atomic64_sub(i, v) atomic64_add(-(i), (v))
133#define atomic64_inc_return(v) atomic64_add_return(1, (v))
134#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
135#define atomic64_inc(v) atomic64_add(1, (v))
136#define atomic64_dec(v) atomic64_sub(1, (v))
137
138#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
139#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
140#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
141#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
142
143#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
144
145/* Atomic dec and inc don't implement barrier, so provide them if needed. */
146#define smp_mb__before_atomic_dec() smp_mb()
147#define smp_mb__after_atomic_dec() smp_mb()
148#define smp_mb__before_atomic_inc() smp_mb()
149#define smp_mb__after_atomic_inc() smp_mb()
150
151/* Define this to indicate that cmpxchg is an efficient operation. */
152#define __HAVE_ARCH_CMPXCHG
153
154#endif /* !__ASSEMBLY__ */
155
156#endif /* _ASM_TILE_ATOMIC_64_H */
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h
index f18887d82399..bd5399a69edf 100644
--- a/arch/tile/include/asm/backtrace.h
+++ b/arch/tile/include/asm/backtrace.h
@@ -12,80 +12,41 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15#ifndef _TILE_BACKTRACE_H 15#ifndef _ASM_TILE_BACKTRACE_H
16#define _TILE_BACKTRACE_H 16#define _ASM_TILE_BACKTRACE_H
17
18
19 17
20#include <linux/types.h> 18#include <linux/types.h>
21 19
22#include <arch/chip.h> 20/* Reads 'size' bytes from 'address' and writes the data to 'result'.
23
24#if defined(__tile__)
25typedef unsigned long VirtualAddress;
26#elif CHIP_VA_WIDTH() > 32
27typedef unsigned long long VirtualAddress;
28#else
29typedef unsigned int VirtualAddress;
30#endif
31
32
33/** Reads 'size' bytes from 'address' and writes the data to 'result'.
34 * Returns true if successful, else false (e.g. memory not readable). 21 * Returns true if successful, else false (e.g. memory not readable).
35 */ 22 */
36typedef bool (*BacktraceMemoryReader)(void *result, 23typedef bool (*BacktraceMemoryReader)(void *result,
37 VirtualAddress address, 24 unsigned long address,
38 unsigned int size, 25 unsigned int size,
39 void *extra); 26 void *extra);
40 27
41typedef struct { 28typedef struct {
42 /** Current PC. */ 29 /* Current PC. */
43 VirtualAddress pc; 30 unsigned long pc;
44 31
45 /** Current stack pointer value. */ 32 /* Current stack pointer value. */
46 VirtualAddress sp; 33 unsigned long sp;
47 34
48 /** Current frame pointer value (i.e. caller's stack pointer) */ 35 /* Current frame pointer value (i.e. caller's stack pointer) */
49 VirtualAddress fp; 36 unsigned long fp;
50 37
51 /** Internal use only: caller's PC for first frame. */ 38 /* Internal use only: caller's PC for first frame. */
52 VirtualAddress initial_frame_caller_pc; 39 unsigned long initial_frame_caller_pc;
53 40
54 /** Internal use only: callback to read memory. */ 41 /* Internal use only: callback to read memory. */
55 BacktraceMemoryReader read_memory_func; 42 BacktraceMemoryReader read_memory_func;
56 43
57 /** Internal use only: arbitrary argument to read_memory_func. */ 44 /* Internal use only: arbitrary argument to read_memory_func. */
58 void *read_memory_func_extra; 45 void *read_memory_func_extra;
59 46
60} BacktraceIterator; 47} BacktraceIterator;
61 48
62 49
63/** Initializes a backtracer to start from the given location.
64 *
65 * If the frame pointer cannot be determined it is set to -1.
66 *
67 * @param state The state to be filled in.
68 * @param read_memory_func A callback that reads memory. If NULL, a default
69 * value is provided.
70 * @param read_memory_func_extra An arbitrary argument to read_memory_func.
71 * @param pc The current PC.
72 * @param lr The current value of the 'lr' register.
73 * @param sp The current value of the 'sp' register.
74 * @param r52 The current value of the 'r52' register.
75 */
76extern void backtrace_init(BacktraceIterator *state,
77 BacktraceMemoryReader read_memory_func,
78 void *read_memory_func_extra,
79 VirtualAddress pc, VirtualAddress lr,
80 VirtualAddress sp, VirtualAddress r52);
81
82
83/** Advances the backtracing state to the calling frame, returning
84 * true iff successful.
85 */
86extern bool backtrace_next(BacktraceIterator *state);
87
88
89typedef enum { 50typedef enum {
90 51
91 /* We have no idea what the caller's pc is. */ 52 /* We have no idea what the caller's pc is. */
@@ -138,7 +99,7 @@ enum {
138}; 99};
139 100
140 101
141/** Internal constants used to define 'info' operands. */ 102/* Internal constants used to define 'info' operands. */
142enum { 103enum {
143 /* 0 and 1 are reserved, as are all negative numbers. */ 104 /* 0 and 1 are reserved, as are all negative numbers. */
144 105
@@ -147,13 +108,10 @@ enum {
147 CALLER_SP_IN_R52_BASE = 4, 108 CALLER_SP_IN_R52_BASE = 4,
148 109
149 CALLER_SP_OFFSET_BASE = 8, 110 CALLER_SP_OFFSET_BASE = 8,
150
151 /* Marks the entry point of certain functions. */
152 ENTRY_POINT_INFO_OP = 16
153}; 111};
154 112
155 113
156/** Current backtracer state describing where it thinks the caller is. */ 114/* Current backtracer state describing where it thinks the caller is. */
157typedef struct { 115typedef struct {
158 /* 116 /*
159 * Public fields 117 * Public fields
@@ -192,7 +150,13 @@ typedef struct {
192 150
193} CallerLocation; 151} CallerLocation;
194 152
153extern void backtrace_init(BacktraceIterator *state,
154 BacktraceMemoryReader read_memory_func,
155 void *read_memory_func_extra,
156 unsigned long pc, unsigned long lr,
157 unsigned long sp, unsigned long r52);
195 158
196 159
160extern bool backtrace_next(BacktraceIterator *state);
197 161
198#endif /* _TILE_BACKTRACE_H */ 162#endif /* _ASM_TILE_BACKTRACE_H */
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index 132e6bbd07e9..16f1fa51fea1 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -122,6 +122,7 @@ static inline unsigned long __arch_hweight64(__u64 w)
122#include <asm-generic/bitops/lock.h> 122#include <asm-generic/bitops/lock.h>
123#include <asm-generic/bitops/find.h> 123#include <asm-generic/bitops/find.h>
124#include <asm-generic/bitops/sched.h> 124#include <asm-generic/bitops/sched.h>
125#include <asm-generic/bitops/non-atomic.h>
125#include <asm-generic/bitops/le.h> 126#include <asm-generic/bitops/le.h>
126 127
127#endif /* _ASM_TILE_BITOPS_H */ 128#endif /* _ASM_TILE_BITOPS_H */
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index 2638be51a164..d31ab905cfa7 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -126,7 +126,6 @@ static inline int test_and_change_bit(unsigned nr,
126#define smp_mb__before_clear_bit() smp_mb() 126#define smp_mb__before_clear_bit() smp_mb()
127#define smp_mb__after_clear_bit() do {} while (0) 127#define smp_mb__after_clear_bit() do {} while (0)
128 128
129#include <asm-generic/bitops/non-atomic.h>
130#include <asm-generic/bitops/ext2-atomic.h> 129#include <asm-generic/bitops/ext2-atomic.h>
131 130
132#endif /* _ASM_TILE_BITOPS_32_H */ 131#endif /* _ASM_TILE_BITOPS_32_H */
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
new file mode 100644
index 000000000000..99615e8d2d8b
--- /dev/null
+++ b/arch/tile/include/asm/bitops_64.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_BITOPS_64_H
16#define _ASM_TILE_BITOPS_64_H
17
18#include <linux/compiler.h>
19#include <asm/atomic.h>
20#include <asm/system.h>
21
22/* See <asm/bitops.h> for API comments. */
23
24static inline void set_bit(unsigned nr, volatile unsigned long *addr)
25{
26 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
27 __insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
28}
29
30static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
31{
32 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
33 __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
34}
35
36#define smp_mb__before_clear_bit() smp_mb()
37#define smp_mb__after_clear_bit() smp_mb()
38
39
40static inline void change_bit(unsigned nr, volatile unsigned long *addr)
41{
42 unsigned long old, mask = (1UL << (nr % BITS_PER_LONG));
43 long guess, oldval;
44 addr += nr / BITS_PER_LONG;
45 old = *addr;
46 do {
47 guess = oldval;
48 oldval = atomic64_cmpxchg((atomic64_t *)addr,
49 guess, guess ^ mask);
50 } while (guess != oldval);
51}
52
53
54/*
55 * The test_and_xxx_bit() routines require a memory fence before we
56 * start the operation, and after the operation completes. We use
57 * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
58 * barrier(), to block until the atomic op is complete.
59 */
60
61static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
62{
63 int val;
64 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
65 smp_mb(); /* barrier for proper semantics */
66 val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
67 & mask) != 0;
68 barrier();
69 return val;
70}
71
72
73static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
74{
75 int val;
76 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
77 smp_mb(); /* barrier for proper semantics */
78 val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
79 & mask) != 0;
80 barrier();
81 return val;
82}
83
84
85static inline int test_and_change_bit(unsigned nr,
86 volatile unsigned long *addr)
87{
88 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
89 long guess, oldval = *addr;
90 addr += nr / BITS_PER_LONG;
91 oldval = *addr;
92 do {
93 guess = oldval;
94 oldval = atomic64_cmpxchg((atomic64_t *)addr,
95 guess, guess ^ mask);
96 } while (guess != oldval);
97 return (oldval & mask) != 0;
98}
99
100#define ext2_set_bit_atomic(lock, nr, addr) \
101 test_and_set_bit((nr), (unsigned long *)(addr))
102#define ext2_clear_bit_atomic(lock, nr, addr) \
103 test_and_clear_bit((nr), (unsigned long *)(addr))
104
105#endif /* _ASM_TILE_BITOPS_64_H */
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h
index 12fb0fb330ee..e925f4bb498f 100644
--- a/arch/tile/include/asm/cacheflush.h
+++ b/arch/tile/include/asm/cacheflush.h
@@ -116,22 +116,28 @@ static inline void __finv_buffer(void *buffer, size_t size)
116} 116}
117 117
118 118
119/* Invalidate a VA range, then memory fence. */ 119/* Invalidate a VA range and wait for it to be complete. */
120static inline void inv_buffer(void *buffer, size_t size) 120static inline void inv_buffer(void *buffer, size_t size)
121{ 121{
122 __inv_buffer(buffer, size); 122 __inv_buffer(buffer, size);
123 mb_incoherent(); 123 mb();
124} 124}
125 125
126/* Flush a VA range, then memory fence. */ 126/*
127static inline void flush_buffer(void *buffer, size_t size) 127 * Flush a locally-homecached VA range and wait for the evicted
128 * cachelines to hit memory.
129 */
130static inline void flush_buffer_local(void *buffer, size_t size)
128{ 131{
129 __flush_buffer(buffer, size); 132 __flush_buffer(buffer, size);
130 mb_incoherent(); 133 mb_incoherent();
131} 134}
132 135
133/* Flush & invalidate a VA range, then memory fence. */ 136/*
134static inline void finv_buffer(void *buffer, size_t size) 137 * Flush and invalidate a locally-homecached VA range and wait for the
138 * evicted cachelines to hit memory.
139 */
140static inline void finv_buffer_local(void *buffer, size_t size)
135{ 141{
136 __finv_buffer(buffer, size); 142 __finv_buffer(buffer, size);
137 mb_incoherent(); 143 mb_incoherent();
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index c3ae570c0a5d..bf95f55b82b0 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -215,8 +215,8 @@ struct compat_sigaction;
215struct compat_siginfo; 215struct compat_siginfo;
216struct compat_sigaltstack; 216struct compat_sigaltstack;
217long compat_sys_execve(const char __user *path, 217long compat_sys_execve(const char __user *path,
218 const compat_uptr_t __user *argv, 218 compat_uptr_t __user *argv,
219 const compat_uptr_t __user *envp, struct pt_regs *); 219 compat_uptr_t __user *envp, struct pt_regs *);
220long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, 220long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
221 struct compat_sigaction __user *oact, 221 struct compat_sigaction __user *oact,
222 size_t sigsetsize); 222 size_t sigsetsize);
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 15e1dceecc64..eaa06d175b39 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -65,7 +65,8 @@ extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t,
65extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, 65extern void dma_sync_single_range_for_device(struct device *, dma_addr_t,
66 unsigned long offset, size_t, 66 unsigned long offset, size_t,
67 enum dma_data_direction); 67 enum dma_data_direction);
68extern void dma_cache_sync(void *vaddr, size_t, enum dma_data_direction); 68extern void dma_cache_sync(struct device *dev, void *vaddr, size_t,
69 enum dma_data_direction);
69 70
70static inline int 71static inline int
71dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 72dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/tile/include/asm/fb.h b/arch/tile/include/asm/fb.h
new file mode 100644
index 000000000000..3a4988e8df45
--- /dev/null
+++ b/arch/tile/include/asm/fb.h
@@ -0,0 +1 @@
#include <asm-generic/fb.h>
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index d3cbb9b14cbe..c9ea1652af03 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -52,6 +52,7 @@ extern void iounmap(volatile void __iomem *addr);
52#endif 52#endif
53 53
54#define ioremap_nocache(physaddr, size) ioremap(physaddr, size) 54#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
55#define ioremap_wc(physaddr, size) ioremap(physaddr, size)
55#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) 56#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
56#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) 57#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
57 58
@@ -161,6 +162,15 @@ static inline void _tile_writeq(u64 val, unsigned long addr)
161#define iowrite32 writel 162#define iowrite32 writel
162#define iowrite64 writeq 163#define iowrite64 writeq
163 164
165static inline void memset_io(void *dst, int val, size_t len)
166{
167 int x;
168 BUG_ON((unsigned long)dst & 0x3);
169 val = (val & 0xff) * 0x01010101;
170 for (x = 0; x < len; x += 4)
171 writel(val, dst + x);
172}
173
164static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, 174static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
165 size_t len) 175 size_t len)
166{ 176{
@@ -269,6 +279,11 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
269 ioport_panic(); 279 ioport_panic();
270} 280}
271 281
282#define ioread16be(addr) be16_to_cpu(ioread16(addr))
283#define ioread32be(addr) be32_to_cpu(ioread32(addr))
284#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
285#define iowrite32be(v, addr) iowrite32(be32_to_cpu(v), (addr))
286
272#define ioread8_rep(p, dst, count) \ 287#define ioread8_rep(p, dst, count) \
273 insb((unsigned long) (p), (dst), (count)) 288 insb((unsigned long) (p), (dst), (count))
274#define ioread16_rep(p, dst, count) \ 289#define ioread16_rep(p, dst, count) \
@@ -283,4 +298,7 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
283#define iowrite32_rep(p, src, count) \ 298#define iowrite32_rep(p, src, count) \
284 outsl((unsigned long) (p), (src), (count)) 299 outsl((unsigned long) (p), (src), (count))
285 300
301#define virt_to_bus virt_to_phys
302#define bus_to_virt phys_to_virt
303
286#endif /* _ASM_TILE_IO_H */ 304#endif /* _ASM_TILE_IO_H */
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h
index 572fd3ef1d73..94e9a511de84 100644
--- a/arch/tile/include/asm/irq.h
+++ b/arch/tile/include/asm/irq.h
@@ -23,6 +23,8 @@
23/* IRQ numbers used for linux IPIs. */ 23/* IRQ numbers used for linux IPIs. */
24#define IRQ_RESCHEDULE 1 24#define IRQ_RESCHEDULE 1
25 25
26#define irq_canonicalize(irq) (irq)
27
26void ack_bad_irq(unsigned int irq); 28void ack_bad_irq(unsigned int irq);
27 29
28/* 30/*
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 9bc0d0725c28..15fb24641120 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -100,8 +100,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
100 __get_cpu_var(current_asid) = asid; 100 __get_cpu_var(current_asid) = asid;
101 101
102 /* Clear cpu from the old mm, and set it in the new one. */ 102 /* Clear cpu from the old mm, and set it in the new one. */
103 cpumask_clear_cpu(cpu, &prev->cpu_vm_mask); 103 cpumask_clear_cpu(cpu, mm_cpumask(prev));
104 cpumask_set_cpu(cpu, &next->cpu_vm_mask); 104 cpumask_set_cpu(cpu, mm_cpumask(next));
105 105
106 /* Re-load page tables */ 106 /* Re-load page tables */
107 install_page_table(next->pgd, asid); 107 install_page_table(next->pgd, asid);
diff --git a/arch/tile/include/asm/opcode-tile_32.h b/arch/tile/include/asm/opcode-tile_32.h
index eda60ecbae3d..03df7b1e77bf 100644
--- a/arch/tile/include/asm/opcode-tile_32.h
+++ b/arch/tile/include/asm/opcode-tile_32.h
@@ -1502,5 +1502,12 @@ extern int parse_insn_tile(tile_bundle_bits bits,
1502 decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); 1502 decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]);
1503 1503
1504 1504
1505/* Given a set of bundle bits and a specific pipe, returns which
1506 * instruction the bundle contains in that pipe.
1507 */
1508extern const struct tile_opcode *
1509find_opcode(tile_bundle_bits bits, tile_pipeline pipe);
1510
1511
1505 1512
1506#endif /* opcode_tile_h */ 1513#endif /* opcode_tile_h */
diff --git a/arch/tile/include/asm/opcode-tile_64.h b/arch/tile/include/asm/opcode-tile_64.h
index eda60ecbae3d..c0633466cd5c 100644
--- a/arch/tile/include/asm/opcode-tile_64.h
+++ b/arch/tile/include/asm/opcode-tile_64.h
@@ -5,863 +5,711 @@
5#ifndef opcode_tile_h 5#ifndef opcode_tile_h
6#define opcode_tile_h 6#define opcode_tile_h
7 7
8typedef unsigned long long tile_bundle_bits; 8typedef unsigned long long tilegx_bundle_bits;
9 9
10 10
11enum 11enum
12{ 12{
13 TILE_MAX_OPERANDS = 5 /* mm */ 13 TILEGX_MAX_OPERANDS = 4 /* bfexts */
14}; 14};
15 15
16typedef enum 16typedef enum
17{ 17{
18 TILE_OPC_BPT, 18 TILEGX_OPC_BPT,
19 TILE_OPC_INFO, 19 TILEGX_OPC_INFO,
20 TILE_OPC_INFOL, 20 TILEGX_OPC_INFOL,
21 TILE_OPC_J, 21 TILEGX_OPC_MOVE,
22 TILE_OPC_JAL, 22 TILEGX_OPC_MOVEI,
23 TILE_OPC_MOVE, 23 TILEGX_OPC_MOVELI,
24 TILE_OPC_MOVE_SN, 24 TILEGX_OPC_PREFETCH,
25 TILE_OPC_MOVEI, 25 TILEGX_OPC_PREFETCH_ADD_L1,
26 TILE_OPC_MOVEI_SN, 26 TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
27 TILE_OPC_MOVELI, 27 TILEGX_OPC_PREFETCH_ADD_L2,
28 TILE_OPC_MOVELI_SN, 28 TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
29 TILE_OPC_MOVELIS, 29 TILEGX_OPC_PREFETCH_ADD_L3,
30 TILE_OPC_PREFETCH, 30 TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
31 TILE_OPC_RAISE, 31 TILEGX_OPC_PREFETCH_L1,
32 TILE_OPC_ADD, 32 TILEGX_OPC_PREFETCH_L1_FAULT,
33 TILE_OPC_ADD_SN, 33 TILEGX_OPC_PREFETCH_L2,
34 TILE_OPC_ADDB, 34 TILEGX_OPC_PREFETCH_L2_FAULT,
35 TILE_OPC_ADDB_SN, 35 TILEGX_OPC_PREFETCH_L3,
36 TILE_OPC_ADDBS_U, 36 TILEGX_OPC_PREFETCH_L3_FAULT,
37 TILE_OPC_ADDBS_U_SN, 37 TILEGX_OPC_RAISE,
38 TILE_OPC_ADDH, 38 TILEGX_OPC_ADD,
39 TILE_OPC_ADDH_SN, 39 TILEGX_OPC_ADDI,
40 TILE_OPC_ADDHS, 40 TILEGX_OPC_ADDLI,
41 TILE_OPC_ADDHS_SN, 41 TILEGX_OPC_ADDX,
42 TILE_OPC_ADDI, 42 TILEGX_OPC_ADDXI,
43 TILE_OPC_ADDI_SN, 43 TILEGX_OPC_ADDXLI,
44 TILE_OPC_ADDIB, 44 TILEGX_OPC_ADDXSC,
45 TILE_OPC_ADDIB_SN, 45 TILEGX_OPC_AND,
46 TILE_OPC_ADDIH, 46 TILEGX_OPC_ANDI,
47 TILE_OPC_ADDIH_SN, 47 TILEGX_OPC_BEQZ,
48 TILE_OPC_ADDLI, 48 TILEGX_OPC_BEQZT,
49 TILE_OPC_ADDLI_SN, 49 TILEGX_OPC_BFEXTS,
50 TILE_OPC_ADDLIS, 50 TILEGX_OPC_BFEXTU,
51 TILE_OPC_ADDS, 51 TILEGX_OPC_BFINS,
52 TILE_OPC_ADDS_SN, 52 TILEGX_OPC_BGEZ,
53 TILE_OPC_ADIFFB_U, 53 TILEGX_OPC_BGEZT,
54 TILE_OPC_ADIFFB_U_SN, 54 TILEGX_OPC_BGTZ,
55 TILE_OPC_ADIFFH, 55 TILEGX_OPC_BGTZT,
56 TILE_OPC_ADIFFH_SN, 56 TILEGX_OPC_BLBC,
57 TILE_OPC_AND, 57 TILEGX_OPC_BLBCT,
58 TILE_OPC_AND_SN, 58 TILEGX_OPC_BLBS,
59 TILE_OPC_ANDI, 59 TILEGX_OPC_BLBST,
60 TILE_OPC_ANDI_SN, 60 TILEGX_OPC_BLEZ,
61 TILE_OPC_AULI, 61 TILEGX_OPC_BLEZT,
62 TILE_OPC_AVGB_U, 62 TILEGX_OPC_BLTZ,
63 TILE_OPC_AVGB_U_SN, 63 TILEGX_OPC_BLTZT,
64 TILE_OPC_AVGH, 64 TILEGX_OPC_BNEZ,
65 TILE_OPC_AVGH_SN, 65 TILEGX_OPC_BNEZT,
66 TILE_OPC_BBNS, 66 TILEGX_OPC_CLZ,
67 TILE_OPC_BBNS_SN, 67 TILEGX_OPC_CMOVEQZ,
68 TILE_OPC_BBNST, 68 TILEGX_OPC_CMOVNEZ,
69 TILE_OPC_BBNST_SN, 69 TILEGX_OPC_CMPEQ,
70 TILE_OPC_BBS, 70 TILEGX_OPC_CMPEQI,
71 TILE_OPC_BBS_SN, 71 TILEGX_OPC_CMPEXCH,
72 TILE_OPC_BBST, 72 TILEGX_OPC_CMPEXCH4,
73 TILE_OPC_BBST_SN, 73 TILEGX_OPC_CMPLES,
74 TILE_OPC_BGEZ, 74 TILEGX_OPC_CMPLEU,
75 TILE_OPC_BGEZ_SN, 75 TILEGX_OPC_CMPLTS,
76 TILE_OPC_BGEZT, 76 TILEGX_OPC_CMPLTSI,
77 TILE_OPC_BGEZT_SN, 77 TILEGX_OPC_CMPLTU,
78 TILE_OPC_BGZ, 78 TILEGX_OPC_CMPLTUI,
79 TILE_OPC_BGZ_SN, 79 TILEGX_OPC_CMPNE,
80 TILE_OPC_BGZT, 80 TILEGX_OPC_CMUL,
81 TILE_OPC_BGZT_SN, 81 TILEGX_OPC_CMULA,
82 TILE_OPC_BITX, 82 TILEGX_OPC_CMULAF,
83 TILE_OPC_BITX_SN, 83 TILEGX_OPC_CMULF,
84 TILE_OPC_BLEZ, 84 TILEGX_OPC_CMULFR,
85 TILE_OPC_BLEZ_SN, 85 TILEGX_OPC_CMULH,
86 TILE_OPC_BLEZT, 86 TILEGX_OPC_CMULHR,
87 TILE_OPC_BLEZT_SN, 87 TILEGX_OPC_CRC32_32,
88 TILE_OPC_BLZ, 88 TILEGX_OPC_CRC32_8,
89 TILE_OPC_BLZ_SN, 89 TILEGX_OPC_CTZ,
90 TILE_OPC_BLZT, 90 TILEGX_OPC_DBLALIGN,
91 TILE_OPC_BLZT_SN, 91 TILEGX_OPC_DBLALIGN2,
92 TILE_OPC_BNZ, 92 TILEGX_OPC_DBLALIGN4,
93 TILE_OPC_BNZ_SN, 93 TILEGX_OPC_DBLALIGN6,
94 TILE_OPC_BNZT, 94 TILEGX_OPC_DRAIN,
95 TILE_OPC_BNZT_SN, 95 TILEGX_OPC_DTLBPR,
96 TILE_OPC_BYTEX, 96 TILEGX_OPC_EXCH,
97 TILE_OPC_BYTEX_SN, 97 TILEGX_OPC_EXCH4,
98 TILE_OPC_BZ, 98 TILEGX_OPC_FDOUBLE_ADD_FLAGS,
99 TILE_OPC_BZ_SN, 99 TILEGX_OPC_FDOUBLE_ADDSUB,
100 TILE_OPC_BZT, 100 TILEGX_OPC_FDOUBLE_MUL_FLAGS,
101 TILE_OPC_BZT_SN, 101 TILEGX_OPC_FDOUBLE_PACK1,
102 TILE_OPC_CLZ, 102 TILEGX_OPC_FDOUBLE_PACK2,
103 TILE_OPC_CLZ_SN, 103 TILEGX_OPC_FDOUBLE_SUB_FLAGS,
104 TILE_OPC_CRC32_32, 104 TILEGX_OPC_FDOUBLE_UNPACK_MAX,
105 TILE_OPC_CRC32_32_SN, 105 TILEGX_OPC_FDOUBLE_UNPACK_MIN,
106 TILE_OPC_CRC32_8, 106 TILEGX_OPC_FETCHADD,
107 TILE_OPC_CRC32_8_SN, 107 TILEGX_OPC_FETCHADD4,
108 TILE_OPC_CTZ, 108 TILEGX_OPC_FETCHADDGEZ,
109 TILE_OPC_CTZ_SN, 109 TILEGX_OPC_FETCHADDGEZ4,
110 TILE_OPC_DRAIN, 110 TILEGX_OPC_FETCHAND,
111 TILE_OPC_DTLBPR, 111 TILEGX_OPC_FETCHAND4,
112 TILE_OPC_DWORD_ALIGN, 112 TILEGX_OPC_FETCHOR,
113 TILE_OPC_DWORD_ALIGN_SN, 113 TILEGX_OPC_FETCHOR4,
114 TILE_OPC_FINV, 114 TILEGX_OPC_FINV,
115 TILE_OPC_FLUSH, 115 TILEGX_OPC_FLUSH,
116 TILE_OPC_FNOP, 116 TILEGX_OPC_FLUSHWB,
117 TILE_OPC_ICOH, 117 TILEGX_OPC_FNOP,
118 TILE_OPC_ILL, 118 TILEGX_OPC_FSINGLE_ADD1,
119 TILE_OPC_INTHB, 119 TILEGX_OPC_FSINGLE_ADDSUB2,
120 TILE_OPC_INTHB_SN, 120 TILEGX_OPC_FSINGLE_MUL1,
121 TILE_OPC_INTHH, 121 TILEGX_OPC_FSINGLE_MUL2,
122 TILE_OPC_INTHH_SN, 122 TILEGX_OPC_FSINGLE_PACK1,
123 TILE_OPC_INTLB, 123 TILEGX_OPC_FSINGLE_PACK2,
124 TILE_OPC_INTLB_SN, 124 TILEGX_OPC_FSINGLE_SUB1,
125 TILE_OPC_INTLH, 125 TILEGX_OPC_ICOH,
126 TILE_OPC_INTLH_SN, 126 TILEGX_OPC_ILL,
127 TILE_OPC_INV, 127 TILEGX_OPC_INV,
128 TILE_OPC_IRET, 128 TILEGX_OPC_IRET,
129 TILE_OPC_JALB, 129 TILEGX_OPC_J,
130 TILE_OPC_JALF, 130 TILEGX_OPC_JAL,
131 TILE_OPC_JALR, 131 TILEGX_OPC_JALR,
132 TILE_OPC_JALRP, 132 TILEGX_OPC_JALRP,
133 TILE_OPC_JB, 133 TILEGX_OPC_JR,
134 TILE_OPC_JF, 134 TILEGX_OPC_JRP,
135 TILE_OPC_JR, 135 TILEGX_OPC_LD,
136 TILE_OPC_JRP, 136 TILEGX_OPC_LD1S,
137 TILE_OPC_LB, 137 TILEGX_OPC_LD1S_ADD,
138 TILE_OPC_LB_SN, 138 TILEGX_OPC_LD1U,
139 TILE_OPC_LB_U, 139 TILEGX_OPC_LD1U_ADD,
140 TILE_OPC_LB_U_SN, 140 TILEGX_OPC_LD2S,
141 TILE_OPC_LBADD, 141 TILEGX_OPC_LD2S_ADD,
142 TILE_OPC_LBADD_SN, 142 TILEGX_OPC_LD2U,
143 TILE_OPC_LBADD_U, 143 TILEGX_OPC_LD2U_ADD,
144 TILE_OPC_LBADD_U_SN, 144 TILEGX_OPC_LD4S,
145 TILE_OPC_LH, 145 TILEGX_OPC_LD4S_ADD,
146 TILE_OPC_LH_SN, 146 TILEGX_OPC_LD4U,
147 TILE_OPC_LH_U, 147 TILEGX_OPC_LD4U_ADD,
148 TILE_OPC_LH_U_SN, 148 TILEGX_OPC_LD_ADD,
149 TILE_OPC_LHADD, 149 TILEGX_OPC_LDNA,
150 TILE_OPC_LHADD_SN, 150 TILEGX_OPC_LDNA_ADD,
151 TILE_OPC_LHADD_U, 151 TILEGX_OPC_LDNT,
152 TILE_OPC_LHADD_U_SN, 152 TILEGX_OPC_LDNT1S,
153 TILE_OPC_LNK, 153 TILEGX_OPC_LDNT1S_ADD,
154 TILE_OPC_LNK_SN, 154 TILEGX_OPC_LDNT1U,
155 TILE_OPC_LW, 155 TILEGX_OPC_LDNT1U_ADD,
156 TILE_OPC_LW_SN, 156 TILEGX_OPC_LDNT2S,
157 TILE_OPC_LW_NA, 157 TILEGX_OPC_LDNT2S_ADD,
158 TILE_OPC_LW_NA_SN, 158 TILEGX_OPC_LDNT2U,
159 TILE_OPC_LWADD, 159 TILEGX_OPC_LDNT2U_ADD,
160 TILE_OPC_LWADD_SN, 160 TILEGX_OPC_LDNT4S,
161 TILE_OPC_LWADD_NA, 161 TILEGX_OPC_LDNT4S_ADD,
162 TILE_OPC_LWADD_NA_SN, 162 TILEGX_OPC_LDNT4U,
163 TILE_OPC_MAXB_U, 163 TILEGX_OPC_LDNT4U_ADD,
164 TILE_OPC_MAXB_U_SN, 164 TILEGX_OPC_LDNT_ADD,
165 TILE_OPC_MAXH, 165 TILEGX_OPC_LNK,
166 TILE_OPC_MAXH_SN, 166 TILEGX_OPC_MF,
167 TILE_OPC_MAXIB_U, 167 TILEGX_OPC_MFSPR,
168 TILE_OPC_MAXIB_U_SN, 168 TILEGX_OPC_MM,
169 TILE_OPC_MAXIH, 169 TILEGX_OPC_MNZ,
170 TILE_OPC_MAXIH_SN, 170 TILEGX_OPC_MTSPR,
171 TILE_OPC_MF, 171 TILEGX_OPC_MUL_HS_HS,
172 TILE_OPC_MFSPR, 172 TILEGX_OPC_MUL_HS_HU,
173 TILE_OPC_MINB_U, 173 TILEGX_OPC_MUL_HS_LS,
174 TILE_OPC_MINB_U_SN, 174 TILEGX_OPC_MUL_HS_LU,
175 TILE_OPC_MINH, 175 TILEGX_OPC_MUL_HU_HU,
176 TILE_OPC_MINH_SN, 176 TILEGX_OPC_MUL_HU_LS,
177 TILE_OPC_MINIB_U, 177 TILEGX_OPC_MUL_HU_LU,
178 TILE_OPC_MINIB_U_SN, 178 TILEGX_OPC_MUL_LS_LS,
179 TILE_OPC_MINIH, 179 TILEGX_OPC_MUL_LS_LU,
180 TILE_OPC_MINIH_SN, 180 TILEGX_OPC_MUL_LU_LU,
181 TILE_OPC_MM, 181 TILEGX_OPC_MULA_HS_HS,
182 TILE_OPC_MNZ, 182 TILEGX_OPC_MULA_HS_HU,
183 TILE_OPC_MNZ_SN, 183 TILEGX_OPC_MULA_HS_LS,
184 TILE_OPC_MNZB, 184 TILEGX_OPC_MULA_HS_LU,
185 TILE_OPC_MNZB_SN, 185 TILEGX_OPC_MULA_HU_HU,
186 TILE_OPC_MNZH, 186 TILEGX_OPC_MULA_HU_LS,
187 TILE_OPC_MNZH_SN, 187 TILEGX_OPC_MULA_HU_LU,
188 TILE_OPC_MTSPR, 188 TILEGX_OPC_MULA_LS_LS,
189 TILE_OPC_MULHH_SS, 189 TILEGX_OPC_MULA_LS_LU,
190 TILE_OPC_MULHH_SS_SN, 190 TILEGX_OPC_MULA_LU_LU,
191 TILE_OPC_MULHH_SU, 191 TILEGX_OPC_MULAX,
192 TILE_OPC_MULHH_SU_SN, 192 TILEGX_OPC_MULX,
193 TILE_OPC_MULHH_UU, 193 TILEGX_OPC_MZ,
194 TILE_OPC_MULHH_UU_SN, 194 TILEGX_OPC_NAP,
195 TILE_OPC_MULHHA_SS, 195 TILEGX_OPC_NOP,
196 TILE_OPC_MULHHA_SS_SN, 196 TILEGX_OPC_NOR,
197 TILE_OPC_MULHHA_SU, 197 TILEGX_OPC_OR,
198 TILE_OPC_MULHHA_SU_SN, 198 TILEGX_OPC_ORI,
199 TILE_OPC_MULHHA_UU, 199 TILEGX_OPC_PCNT,
200 TILE_OPC_MULHHA_UU_SN, 200 TILEGX_OPC_REVBITS,
201 TILE_OPC_MULHHSA_UU, 201 TILEGX_OPC_REVBYTES,
202 TILE_OPC_MULHHSA_UU_SN, 202 TILEGX_OPC_ROTL,
203 TILE_OPC_MULHL_SS, 203 TILEGX_OPC_ROTLI,
204 TILE_OPC_MULHL_SS_SN, 204 TILEGX_OPC_SHL,
205 TILE_OPC_MULHL_SU, 205 TILEGX_OPC_SHL16INSLI,
206 TILE_OPC_MULHL_SU_SN, 206 TILEGX_OPC_SHL1ADD,
207 TILE_OPC_MULHL_US, 207 TILEGX_OPC_SHL1ADDX,
208 TILE_OPC_MULHL_US_SN, 208 TILEGX_OPC_SHL2ADD,
209 TILE_OPC_MULHL_UU, 209 TILEGX_OPC_SHL2ADDX,
210 TILE_OPC_MULHL_UU_SN, 210 TILEGX_OPC_SHL3ADD,
211 TILE_OPC_MULHLA_SS, 211 TILEGX_OPC_SHL3ADDX,
212 TILE_OPC_MULHLA_SS_SN, 212 TILEGX_OPC_SHLI,
213 TILE_OPC_MULHLA_SU, 213 TILEGX_OPC_SHLX,
214 TILE_OPC_MULHLA_SU_SN, 214 TILEGX_OPC_SHLXI,
215 TILE_OPC_MULHLA_US, 215 TILEGX_OPC_SHRS,
216 TILE_OPC_MULHLA_US_SN, 216 TILEGX_OPC_SHRSI,
217 TILE_OPC_MULHLA_UU, 217 TILEGX_OPC_SHRU,
218 TILE_OPC_MULHLA_UU_SN, 218 TILEGX_OPC_SHRUI,
219 TILE_OPC_MULHLSA_UU, 219 TILEGX_OPC_SHRUX,
220 TILE_OPC_MULHLSA_UU_SN, 220 TILEGX_OPC_SHRUXI,
221 TILE_OPC_MULLL_SS, 221 TILEGX_OPC_SHUFFLEBYTES,
222 TILE_OPC_MULLL_SS_SN, 222 TILEGX_OPC_ST,
223 TILE_OPC_MULLL_SU, 223 TILEGX_OPC_ST1,
224 TILE_OPC_MULLL_SU_SN, 224 TILEGX_OPC_ST1_ADD,
225 TILE_OPC_MULLL_UU, 225 TILEGX_OPC_ST2,
226 TILE_OPC_MULLL_UU_SN, 226 TILEGX_OPC_ST2_ADD,
227 TILE_OPC_MULLLA_SS, 227 TILEGX_OPC_ST4,
228 TILE_OPC_MULLLA_SS_SN, 228 TILEGX_OPC_ST4_ADD,
229 TILE_OPC_MULLLA_SU, 229 TILEGX_OPC_ST_ADD,
230 TILE_OPC_MULLLA_SU_SN, 230 TILEGX_OPC_STNT,
231 TILE_OPC_MULLLA_UU, 231 TILEGX_OPC_STNT1,
232 TILE_OPC_MULLLA_UU_SN, 232 TILEGX_OPC_STNT1_ADD,
233 TILE_OPC_MULLLSA_UU, 233 TILEGX_OPC_STNT2,
234 TILE_OPC_MULLLSA_UU_SN, 234 TILEGX_OPC_STNT2_ADD,
235 TILE_OPC_MVNZ, 235 TILEGX_OPC_STNT4,
236 TILE_OPC_MVNZ_SN, 236 TILEGX_OPC_STNT4_ADD,
237 TILE_OPC_MVZ, 237 TILEGX_OPC_STNT_ADD,
238 TILE_OPC_MVZ_SN, 238 TILEGX_OPC_SUB,
239 TILE_OPC_MZ, 239 TILEGX_OPC_SUBX,
240 TILE_OPC_MZ_SN, 240 TILEGX_OPC_SUBXSC,
241 TILE_OPC_MZB, 241 TILEGX_OPC_SWINT0,
242 TILE_OPC_MZB_SN, 242 TILEGX_OPC_SWINT1,
243 TILE_OPC_MZH, 243 TILEGX_OPC_SWINT2,
244 TILE_OPC_MZH_SN, 244 TILEGX_OPC_SWINT3,
245 TILE_OPC_NAP, 245 TILEGX_OPC_TBLIDXB0,
246 TILE_OPC_NOP, 246 TILEGX_OPC_TBLIDXB1,
247 TILE_OPC_NOR, 247 TILEGX_OPC_TBLIDXB2,
248 TILE_OPC_NOR_SN, 248 TILEGX_OPC_TBLIDXB3,
249 TILE_OPC_OR, 249 TILEGX_OPC_V1ADD,
250 TILE_OPC_OR_SN, 250 TILEGX_OPC_V1ADDI,
251 TILE_OPC_ORI, 251 TILEGX_OPC_V1ADDUC,
252 TILE_OPC_ORI_SN, 252 TILEGX_OPC_V1ADIFFU,
253 TILE_OPC_PACKBS_U, 253 TILEGX_OPC_V1AVGU,
254 TILE_OPC_PACKBS_U_SN, 254 TILEGX_OPC_V1CMPEQ,
255 TILE_OPC_PACKHB, 255 TILEGX_OPC_V1CMPEQI,
256 TILE_OPC_PACKHB_SN, 256 TILEGX_OPC_V1CMPLES,
257 TILE_OPC_PACKHS, 257 TILEGX_OPC_V1CMPLEU,
258 TILE_OPC_PACKHS_SN, 258 TILEGX_OPC_V1CMPLTS,
259 TILE_OPC_PACKLB, 259 TILEGX_OPC_V1CMPLTSI,
260 TILE_OPC_PACKLB_SN, 260 TILEGX_OPC_V1CMPLTU,
261 TILE_OPC_PCNT, 261 TILEGX_OPC_V1CMPLTUI,
262 TILE_OPC_PCNT_SN, 262 TILEGX_OPC_V1CMPNE,
263 TILE_OPC_RL, 263 TILEGX_OPC_V1DDOTPU,
264 TILE_OPC_RL_SN, 264 TILEGX_OPC_V1DDOTPUA,
265 TILE_OPC_RLI, 265 TILEGX_OPC_V1DDOTPUS,
266 TILE_OPC_RLI_SN, 266 TILEGX_OPC_V1DDOTPUSA,
267 TILE_OPC_S1A, 267 TILEGX_OPC_V1DOTP,
268 TILE_OPC_S1A_SN, 268 TILEGX_OPC_V1DOTPA,
269 TILE_OPC_S2A, 269 TILEGX_OPC_V1DOTPU,
270 TILE_OPC_S2A_SN, 270 TILEGX_OPC_V1DOTPUA,
271 TILE_OPC_S3A, 271 TILEGX_OPC_V1DOTPUS,
272 TILE_OPC_S3A_SN, 272 TILEGX_OPC_V1DOTPUSA,
273 TILE_OPC_SADAB_U, 273 TILEGX_OPC_V1INT_H,
274 TILE_OPC_SADAB_U_SN, 274 TILEGX_OPC_V1INT_L,
275 TILE_OPC_SADAH, 275 TILEGX_OPC_V1MAXU,
276 TILE_OPC_SADAH_SN, 276 TILEGX_OPC_V1MAXUI,
277 TILE_OPC_SADAH_U, 277 TILEGX_OPC_V1MINU,
278 TILE_OPC_SADAH_U_SN, 278 TILEGX_OPC_V1MINUI,
279 TILE_OPC_SADB_U, 279 TILEGX_OPC_V1MNZ,
280 TILE_OPC_SADB_U_SN, 280 TILEGX_OPC_V1MULTU,
281 TILE_OPC_SADH, 281 TILEGX_OPC_V1MULU,
282 TILE_OPC_SADH_SN, 282 TILEGX_OPC_V1MULUS,
283 TILE_OPC_SADH_U, 283 TILEGX_OPC_V1MZ,
284 TILE_OPC_SADH_U_SN, 284 TILEGX_OPC_V1SADAU,
285 TILE_OPC_SB, 285 TILEGX_OPC_V1SADU,
286 TILE_OPC_SBADD, 286 TILEGX_OPC_V1SHL,
287 TILE_OPC_SEQ, 287 TILEGX_OPC_V1SHLI,
288 TILE_OPC_SEQ_SN, 288 TILEGX_OPC_V1SHRS,
289 TILE_OPC_SEQB, 289 TILEGX_OPC_V1SHRSI,
290 TILE_OPC_SEQB_SN, 290 TILEGX_OPC_V1SHRU,
291 TILE_OPC_SEQH, 291 TILEGX_OPC_V1SHRUI,
292 TILE_OPC_SEQH_SN, 292 TILEGX_OPC_V1SUB,
293 TILE_OPC_SEQI, 293 TILEGX_OPC_V1SUBUC,
294 TILE_OPC_SEQI_SN, 294 TILEGX_OPC_V2ADD,
295 TILE_OPC_SEQIB, 295 TILEGX_OPC_V2ADDI,
296 TILE_OPC_SEQIB_SN, 296 TILEGX_OPC_V2ADDSC,
297 TILE_OPC_SEQIH, 297 TILEGX_OPC_V2ADIFFS,
298 TILE_OPC_SEQIH_SN, 298 TILEGX_OPC_V2AVGS,
299 TILE_OPC_SH, 299 TILEGX_OPC_V2CMPEQ,
300 TILE_OPC_SHADD, 300 TILEGX_OPC_V2CMPEQI,
301 TILE_OPC_SHL, 301 TILEGX_OPC_V2CMPLES,
302 TILE_OPC_SHL_SN, 302 TILEGX_OPC_V2CMPLEU,
303 TILE_OPC_SHLB, 303 TILEGX_OPC_V2CMPLTS,
304 TILE_OPC_SHLB_SN, 304 TILEGX_OPC_V2CMPLTSI,
305 TILE_OPC_SHLH, 305 TILEGX_OPC_V2CMPLTU,
306 TILE_OPC_SHLH_SN, 306 TILEGX_OPC_V2CMPLTUI,
307 TILE_OPC_SHLI, 307 TILEGX_OPC_V2CMPNE,
308 TILE_OPC_SHLI_SN, 308 TILEGX_OPC_V2DOTP,
309 TILE_OPC_SHLIB, 309 TILEGX_OPC_V2DOTPA,
310 TILE_OPC_SHLIB_SN, 310 TILEGX_OPC_V2INT_H,
311 TILE_OPC_SHLIH, 311 TILEGX_OPC_V2INT_L,
312 TILE_OPC_SHLIH_SN, 312 TILEGX_OPC_V2MAXS,
313 TILE_OPC_SHR, 313 TILEGX_OPC_V2MAXSI,
314 TILE_OPC_SHR_SN, 314 TILEGX_OPC_V2MINS,
315 TILE_OPC_SHRB, 315 TILEGX_OPC_V2MINSI,
316 TILE_OPC_SHRB_SN, 316 TILEGX_OPC_V2MNZ,
317 TILE_OPC_SHRH, 317 TILEGX_OPC_V2MULFSC,
318 TILE_OPC_SHRH_SN, 318 TILEGX_OPC_V2MULS,
319 TILE_OPC_SHRI, 319 TILEGX_OPC_V2MULTS,
320 TILE_OPC_SHRI_SN, 320 TILEGX_OPC_V2MZ,
321 TILE_OPC_SHRIB, 321 TILEGX_OPC_V2PACKH,
322 TILE_OPC_SHRIB_SN, 322 TILEGX_OPC_V2PACKL,
323 TILE_OPC_SHRIH, 323 TILEGX_OPC_V2PACKUC,
324 TILE_OPC_SHRIH_SN, 324 TILEGX_OPC_V2SADAS,
325 TILE_OPC_SLT, 325 TILEGX_OPC_V2SADAU,
326 TILE_OPC_SLT_SN, 326 TILEGX_OPC_V2SADS,
327 TILE_OPC_SLT_U, 327 TILEGX_OPC_V2SADU,
328 TILE_OPC_SLT_U_SN, 328 TILEGX_OPC_V2SHL,
329 TILE_OPC_SLTB, 329 TILEGX_OPC_V2SHLI,
330 TILE_OPC_SLTB_SN, 330 TILEGX_OPC_V2SHLSC,
331 TILE_OPC_SLTB_U, 331 TILEGX_OPC_V2SHRS,
332 TILE_OPC_SLTB_U_SN, 332 TILEGX_OPC_V2SHRSI,
333 TILE_OPC_SLTE, 333 TILEGX_OPC_V2SHRU,
334 TILE_OPC_SLTE_SN, 334 TILEGX_OPC_V2SHRUI,
335 TILE_OPC_SLTE_U, 335 TILEGX_OPC_V2SUB,
336 TILE_OPC_SLTE_U_SN, 336 TILEGX_OPC_V2SUBSC,
337 TILE_OPC_SLTEB, 337 TILEGX_OPC_V4ADD,
338 TILE_OPC_SLTEB_SN, 338 TILEGX_OPC_V4ADDSC,
339 TILE_OPC_SLTEB_U, 339 TILEGX_OPC_V4INT_H,
340 TILE_OPC_SLTEB_U_SN, 340 TILEGX_OPC_V4INT_L,
341 TILE_OPC_SLTEH, 341 TILEGX_OPC_V4PACKSC,
342 TILE_OPC_SLTEH_SN, 342 TILEGX_OPC_V4SHL,
343 TILE_OPC_SLTEH_U, 343 TILEGX_OPC_V4SHLSC,
344 TILE_OPC_SLTEH_U_SN, 344 TILEGX_OPC_V4SHRS,
345 TILE_OPC_SLTH, 345 TILEGX_OPC_V4SHRU,
346 TILE_OPC_SLTH_SN, 346 TILEGX_OPC_V4SUB,
347 TILE_OPC_SLTH_U, 347 TILEGX_OPC_V4SUBSC,
348 TILE_OPC_SLTH_U_SN, 348 TILEGX_OPC_WH64,
349 TILE_OPC_SLTI, 349 TILEGX_OPC_XOR,
350 TILE_OPC_SLTI_SN, 350 TILEGX_OPC_XORI,
351 TILE_OPC_SLTI_U, 351 TILEGX_OPC_NONE
352 TILE_OPC_SLTI_U_SN, 352} tilegx_mnemonic;
353 TILE_OPC_SLTIB,
354 TILE_OPC_SLTIB_SN,
355 TILE_OPC_SLTIB_U,
356 TILE_OPC_SLTIB_U_SN,
357 TILE_OPC_SLTIH,
358 TILE_OPC_SLTIH_SN,
359 TILE_OPC_SLTIH_U,
360 TILE_OPC_SLTIH_U_SN,
361 TILE_OPC_SNE,
362 TILE_OPC_SNE_SN,
363 TILE_OPC_SNEB,
364 TILE_OPC_SNEB_SN,
365 TILE_OPC_SNEH,
366 TILE_OPC_SNEH_SN,
367 TILE_OPC_SRA,
368 TILE_OPC_SRA_SN,
369 TILE_OPC_SRAB,
370 TILE_OPC_SRAB_SN,
371 TILE_OPC_SRAH,
372 TILE_OPC_SRAH_SN,
373 TILE_OPC_SRAI,
374 TILE_OPC_SRAI_SN,
375 TILE_OPC_SRAIB,
376 TILE_OPC_SRAIB_SN,
377 TILE_OPC_SRAIH,
378 TILE_OPC_SRAIH_SN,
379 TILE_OPC_SUB,
380 TILE_OPC_SUB_SN,
381 TILE_OPC_SUBB,
382 TILE_OPC_SUBB_SN,
383 TILE_OPC_SUBBS_U,
384 TILE_OPC_SUBBS_U_SN,
385 TILE_OPC_SUBH,
386 TILE_OPC_SUBH_SN,
387 TILE_OPC_SUBHS,
388 TILE_OPC_SUBHS_SN,
389 TILE_OPC_SUBS,
390 TILE_OPC_SUBS_SN,
391 TILE_OPC_SW,
392 TILE_OPC_SWADD,
393 TILE_OPC_SWINT0,
394 TILE_OPC_SWINT1,
395 TILE_OPC_SWINT2,
396 TILE_OPC_SWINT3,
397 TILE_OPC_TBLIDXB0,
398 TILE_OPC_TBLIDXB0_SN,
399 TILE_OPC_TBLIDXB1,
400 TILE_OPC_TBLIDXB1_SN,
401 TILE_OPC_TBLIDXB2,
402 TILE_OPC_TBLIDXB2_SN,
403 TILE_OPC_TBLIDXB3,
404 TILE_OPC_TBLIDXB3_SN,
405 TILE_OPC_TNS,
406 TILE_OPC_TNS_SN,
407 TILE_OPC_WH64,
408 TILE_OPC_XOR,
409 TILE_OPC_XOR_SN,
410 TILE_OPC_XORI,
411 TILE_OPC_XORI_SN,
412 TILE_OPC_NONE
413} tile_mnemonic;
414 353
415/* 64-bit pattern for a { bpt ; nop } bundle. */ 354/* 64-bit pattern for a { bpt ; nop } bundle. */
416#define TILE_BPT_BUNDLE 0x400b3cae70166000ULL 355#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
417 356
418 357
419#define TILE_ELF_MACHINE_CODE EM_TILEPRO 358#define TILE_ELF_MACHINE_CODE EM_TILE64
420 359
421#define TILE_ELF_NAME "elf32-tilepro" 360#define TILE_ELF_NAME "elf32-tile64"
422 361
423 362
424static __inline unsigned int 363static __inline unsigned int
425get_BrOff_SN(tile_bundle_bits num) 364get_BFEnd_X0(tilegx_bundle_bits num)
426{ 365{
427 const unsigned int n = (unsigned int)num; 366 const unsigned int n = (unsigned int)num;
428 return (((n >> 0)) & 0x3ff); 367 return (((n >> 12)) & 0x3f);
429} 368}
430 369
431static __inline unsigned int 370static __inline unsigned int
432get_BrOff_X1(tile_bundle_bits n) 371get_BFOpcodeExtension_X0(tilegx_bundle_bits num)
433{ 372{
434 return (((unsigned int)(n >> 43)) & 0x00007fff) | 373 const unsigned int n = (unsigned int)num;
435 (((unsigned int)(n >> 20)) & 0x00018000); 374 return (((n >> 24)) & 0xf);
436} 375}
437 376
438static __inline unsigned int 377static __inline unsigned int
439get_BrType_X1(tile_bundle_bits n) 378get_BFStart_X0(tilegx_bundle_bits num)
440{ 379{
441 return (((unsigned int)(n >> 31)) & 0xf); 380 const unsigned int n = (unsigned int)num;
381 return (((n >> 18)) & 0x3f);
442} 382}
443 383
444static __inline unsigned int 384static __inline unsigned int
445get_Dest_Imm8_X1(tile_bundle_bits n) 385get_BrOff_X1(tilegx_bundle_bits n)
446{ 386{
447 return (((unsigned int)(n >> 31)) & 0x0000003f) | 387 return (((unsigned int)(n >> 31)) & 0x0000003f) |
448 (((unsigned int)(n >> 43)) & 0x000000c0); 388 (((unsigned int)(n >> 37)) & 0x0001ffc0);
449} 389}
450 390
451static __inline unsigned int 391static __inline unsigned int
452get_Dest_SN(tile_bundle_bits num) 392get_BrType_X1(tilegx_bundle_bits n)
453{ 393{
454 const unsigned int n = (unsigned int)num; 394 return (((unsigned int)(n >> 54)) & 0x1f);
455 return (((n >> 2)) & 0x3);
456} 395}
457 396
458static __inline unsigned int 397static __inline unsigned int
459get_Dest_X0(tile_bundle_bits num) 398get_Dest_Imm8_X1(tilegx_bundle_bits n)
399{
400 return (((unsigned int)(n >> 31)) & 0x0000003f) |
401 (((unsigned int)(n >> 43)) & 0x000000c0);
402}
403
404static __inline unsigned int
405get_Dest_X0(tilegx_bundle_bits num)
460{ 406{
461 const unsigned int n = (unsigned int)num; 407 const unsigned int n = (unsigned int)num;
462 return (((n >> 0)) & 0x3f); 408 return (((n >> 0)) & 0x3f);
463} 409}
464 410
465static __inline unsigned int 411static __inline unsigned int
466get_Dest_X1(tile_bundle_bits n) 412get_Dest_X1(tilegx_bundle_bits n)
467{ 413{
468 return (((unsigned int)(n >> 31)) & 0x3f); 414 return (((unsigned int)(n >> 31)) & 0x3f);
469} 415}
470 416
471static __inline unsigned int 417static __inline unsigned int
472get_Dest_Y0(tile_bundle_bits num) 418get_Dest_Y0(tilegx_bundle_bits num)
473{ 419{
474 const unsigned int n = (unsigned int)num; 420 const unsigned int n = (unsigned int)num;
475 return (((n >> 0)) & 0x3f); 421 return (((n >> 0)) & 0x3f);
476} 422}
477 423
478static __inline unsigned int 424static __inline unsigned int
479get_Dest_Y1(tile_bundle_bits n) 425get_Dest_Y1(tilegx_bundle_bits n)
480{ 426{
481 return (((unsigned int)(n >> 31)) & 0x3f); 427 return (((unsigned int)(n >> 31)) & 0x3f);
482} 428}
483 429
484static __inline unsigned int 430static __inline unsigned int
485get_Imm16_X0(tile_bundle_bits num) 431get_Imm16_X0(tilegx_bundle_bits num)
486{ 432{
487 const unsigned int n = (unsigned int)num; 433 const unsigned int n = (unsigned int)num;
488 return (((n >> 12)) & 0xffff); 434 return (((n >> 12)) & 0xffff);
489} 435}
490 436
491static __inline unsigned int 437static __inline unsigned int
492get_Imm16_X1(tile_bundle_bits n) 438get_Imm16_X1(tilegx_bundle_bits n)
493{ 439{
494 return (((unsigned int)(n >> 43)) & 0xffff); 440 return (((unsigned int)(n >> 43)) & 0xffff);
495} 441}
496 442
497static __inline unsigned int 443static __inline unsigned int
498get_Imm8_SN(tile_bundle_bits num) 444get_Imm8OpcodeExtension_X0(tilegx_bundle_bits num)
499{
500 const unsigned int n = (unsigned int)num;
501 return (((n >> 0)) & 0xff);
502}
503
504static __inline unsigned int
505get_Imm8_X0(tile_bundle_bits num)
506{ 445{
507 const unsigned int n = (unsigned int)num; 446 const unsigned int n = (unsigned int)num;
508 return (((n >> 12)) & 0xff); 447 return (((n >> 20)) & 0xff);
509} 448}
510 449
511static __inline unsigned int 450static __inline unsigned int
512get_Imm8_X1(tile_bundle_bits n) 451get_Imm8OpcodeExtension_X1(tilegx_bundle_bits n)
513{ 452{
514 return (((unsigned int)(n >> 43)) & 0xff); 453 return (((unsigned int)(n >> 51)) & 0xff);
515} 454}
516 455
517static __inline unsigned int 456static __inline unsigned int
518get_Imm8_Y0(tile_bundle_bits num) 457get_Imm8_X0(tilegx_bundle_bits num)
519{ 458{
520 const unsigned int n = (unsigned int)num; 459 const unsigned int n = (unsigned int)num;
521 return (((n >> 12)) & 0xff); 460 return (((n >> 12)) & 0xff);
522} 461}
523 462
524static __inline unsigned int 463static __inline unsigned int
525get_Imm8_Y1(tile_bundle_bits n) 464get_Imm8_X1(tilegx_bundle_bits n)
526{ 465{
527 return (((unsigned int)(n >> 43)) & 0xff); 466 return (((unsigned int)(n >> 43)) & 0xff);
528} 467}
529 468
530static __inline unsigned int 469static __inline unsigned int
531get_ImmOpcodeExtension_X0(tile_bundle_bits num) 470get_Imm8_Y0(tilegx_bundle_bits num)
532{
533 const unsigned int n = (unsigned int)num;
534 return (((n >> 20)) & 0x7f);
535}
536
537static __inline unsigned int
538get_ImmOpcodeExtension_X1(tile_bundle_bits n)
539{
540 return (((unsigned int)(n >> 51)) & 0x7f);
541}
542
543static __inline unsigned int
544get_ImmRROpcodeExtension_SN(tile_bundle_bits num)
545{ 471{
546 const unsigned int n = (unsigned int)num; 472 const unsigned int n = (unsigned int)num;
547 return (((n >> 8)) & 0x3); 473 return (((n >> 12)) & 0xff);
548}
549
550static __inline unsigned int
551get_JOffLong_X1(tile_bundle_bits n)
552{
553 return (((unsigned int)(n >> 43)) & 0x00007fff) |
554 (((unsigned int)(n >> 20)) & 0x00018000) |
555 (((unsigned int)(n >> 14)) & 0x001e0000) |
556 (((unsigned int)(n >> 16)) & 0x07e00000) |
557 (((unsigned int)(n >> 31)) & 0x18000000);
558}
559
560static __inline unsigned int
561get_JOff_X1(tile_bundle_bits n)
562{
563 return (((unsigned int)(n >> 43)) & 0x00007fff) |
564 (((unsigned int)(n >> 20)) & 0x00018000) |
565 (((unsigned int)(n >> 14)) & 0x001e0000) |
566 (((unsigned int)(n >> 16)) & 0x07e00000) |
567 (((unsigned int)(n >> 31)) & 0x08000000);
568}
569
570static __inline unsigned int
571get_MF_Imm15_X1(tile_bundle_bits n)
572{
573 return (((unsigned int)(n >> 37)) & 0x00003fff) |
574 (((unsigned int)(n >> 44)) & 0x00004000);
575} 474}
576 475
577static __inline unsigned int 476static __inline unsigned int
578get_MMEnd_X0(tile_bundle_bits num) 477get_Imm8_Y1(tilegx_bundle_bits n)
579{ 478{
580 const unsigned int n = (unsigned int)num; 479 return (((unsigned int)(n >> 43)) & 0xff);
581 return (((n >> 18)) & 0x1f);
582} 480}
583 481
584static __inline unsigned int 482static __inline unsigned int
585get_MMEnd_X1(tile_bundle_bits n) 483get_JumpOff_X1(tilegx_bundle_bits n)
586{ 484{
587 return (((unsigned int)(n >> 49)) & 0x1f); 485 return (((unsigned int)(n >> 31)) & 0x7ffffff);
588} 486}
589 487
590static __inline unsigned int 488static __inline unsigned int
591get_MMStart_X0(tile_bundle_bits num) 489get_JumpOpcodeExtension_X1(tilegx_bundle_bits n)
592{ 490{
593 const unsigned int n = (unsigned int)num; 491 return (((unsigned int)(n >> 58)) & 0x1);
594 return (((n >> 23)) & 0x1f);
595} 492}
596 493
597static __inline unsigned int 494static __inline unsigned int
598get_MMStart_X1(tile_bundle_bits n) 495get_MF_Imm14_X1(tilegx_bundle_bits n)
599{ 496{
600 return (((unsigned int)(n >> 54)) & 0x1f); 497 return (((unsigned int)(n >> 37)) & 0x3fff);
601} 498}
602 499
603static __inline unsigned int 500static __inline unsigned int
604get_MT_Imm15_X1(tile_bundle_bits n) 501get_MT_Imm14_X1(tilegx_bundle_bits n)
605{ 502{
606 return (((unsigned int)(n >> 31)) & 0x0000003f) | 503 return (((unsigned int)(n >> 31)) & 0x0000003f) |
607 (((unsigned int)(n >> 37)) & 0x00003fc0) | 504 (((unsigned int)(n >> 37)) & 0x00003fc0);
608 (((unsigned int)(n >> 44)) & 0x00004000);
609} 505}
610 506
611static __inline unsigned int 507static __inline unsigned int
612get_Mode(tile_bundle_bits n) 508get_Mode(tilegx_bundle_bits n)
613{ 509{
614 return (((unsigned int)(n >> 63)) & 0x1); 510 return (((unsigned int)(n >> 62)) & 0x3);
615} 511}
616 512
617static __inline unsigned int 513static __inline unsigned int
618get_NoRegOpcodeExtension_SN(tile_bundle_bits num) 514get_Opcode_X0(tilegx_bundle_bits num)
619{
620 const unsigned int n = (unsigned int)num;
621 return (((n >> 0)) & 0xf);
622}
623
624static __inline unsigned int
625get_Opcode_SN(tile_bundle_bits num)
626{
627 const unsigned int n = (unsigned int)num;
628 return (((n >> 10)) & 0x3f);
629}
630
631static __inline unsigned int
632get_Opcode_X0(tile_bundle_bits num)
633{ 515{
634 const unsigned int n = (unsigned int)num; 516 const unsigned int n = (unsigned int)num;
635 return (((n >> 28)) & 0x7); 517 return (((n >> 28)) & 0x7);
636} 518}
637 519
638static __inline unsigned int 520static __inline unsigned int
639get_Opcode_X1(tile_bundle_bits n) 521get_Opcode_X1(tilegx_bundle_bits n)
640{ 522{
641 return (((unsigned int)(n >> 59)) & 0xf); 523 return (((unsigned int)(n >> 59)) & 0x7);
642} 524}
643 525
644static __inline unsigned int 526static __inline unsigned int
645get_Opcode_Y0(tile_bundle_bits num) 527get_Opcode_Y0(tilegx_bundle_bits num)
646{ 528{
647 const unsigned int n = (unsigned int)num; 529 const unsigned int n = (unsigned int)num;
648 return (((n >> 27)) & 0xf); 530 return (((n >> 27)) & 0xf);
649} 531}
650 532
651static __inline unsigned int 533static __inline unsigned int
652get_Opcode_Y1(tile_bundle_bits n) 534get_Opcode_Y1(tilegx_bundle_bits n)
653{ 535{
654 return (((unsigned int)(n >> 59)) & 0xf); 536 return (((unsigned int)(n >> 58)) & 0xf);
655} 537}
656 538
657static __inline unsigned int 539static __inline unsigned int
658get_Opcode_Y2(tile_bundle_bits n) 540get_Opcode_Y2(tilegx_bundle_bits n)
659{ 541{
660 return (((unsigned int)(n >> 56)) & 0x7); 542 return (((n >> 26)) & 0x00000001) |
661} 543 (((unsigned int)(n >> 56)) & 0x00000002);
662
663static __inline unsigned int
664get_RROpcodeExtension_SN(tile_bundle_bits num)
665{
666 const unsigned int n = (unsigned int)num;
667 return (((n >> 4)) & 0xf);
668} 544}
669 545
670static __inline unsigned int 546static __inline unsigned int
671get_RRROpcodeExtension_X0(tile_bundle_bits num) 547get_RRROpcodeExtension_X0(tilegx_bundle_bits num)
672{ 548{
673 const unsigned int n = (unsigned int)num; 549 const unsigned int n = (unsigned int)num;
674 return (((n >> 18)) & 0x1ff); 550 return (((n >> 18)) & 0x3ff);
675} 551}
676 552
677static __inline unsigned int 553static __inline unsigned int
678get_RRROpcodeExtension_X1(tile_bundle_bits n) 554get_RRROpcodeExtension_X1(tilegx_bundle_bits n)
679{ 555{
680 return (((unsigned int)(n >> 49)) & 0x1ff); 556 return (((unsigned int)(n >> 49)) & 0x3ff);
681} 557}
682 558
683static __inline unsigned int 559static __inline unsigned int
684get_RRROpcodeExtension_Y0(tile_bundle_bits num) 560get_RRROpcodeExtension_Y0(tilegx_bundle_bits num)
685{ 561{
686 const unsigned int n = (unsigned int)num; 562 const unsigned int n = (unsigned int)num;
687 return (((n >> 18)) & 0x3); 563 return (((n >> 18)) & 0x3);
688} 564}
689 565
690static __inline unsigned int 566static __inline unsigned int
691get_RRROpcodeExtension_Y1(tile_bundle_bits n) 567get_RRROpcodeExtension_Y1(tilegx_bundle_bits n)
692{ 568{
693 return (((unsigned int)(n >> 49)) & 0x3); 569 return (((unsigned int)(n >> 49)) & 0x3);
694} 570}
695 571
696static __inline unsigned int 572static __inline unsigned int
697get_RouteOpcodeExtension_SN(tile_bundle_bits num) 573get_ShAmt_X0(tilegx_bundle_bits num)
698{
699 const unsigned int n = (unsigned int)num;
700 return (((n >> 0)) & 0x3ff);
701}
702
703static __inline unsigned int
704get_S_X0(tile_bundle_bits num)
705{ 574{
706 const unsigned int n = (unsigned int)num; 575 const unsigned int n = (unsigned int)num;
707 return (((n >> 27)) & 0x1); 576 return (((n >> 12)) & 0x3f);
708} 577}
709 578
710static __inline unsigned int 579static __inline unsigned int
711get_S_X1(tile_bundle_bits n) 580get_ShAmt_X1(tilegx_bundle_bits n)
712{ 581{
713 return (((unsigned int)(n >> 58)) & 0x1); 582 return (((unsigned int)(n >> 43)) & 0x3f);
714} 583}
715 584
716static __inline unsigned int 585static __inline unsigned int
717get_ShAmt_X0(tile_bundle_bits num) 586get_ShAmt_Y0(tilegx_bundle_bits num)
718{ 587{
719 const unsigned int n = (unsigned int)num; 588 const unsigned int n = (unsigned int)num;
720 return (((n >> 12)) & 0x1f); 589 return (((n >> 12)) & 0x3f);
721} 590}
722 591
723static __inline unsigned int 592static __inline unsigned int
724get_ShAmt_X1(tile_bundle_bits n) 593get_ShAmt_Y1(tilegx_bundle_bits n)
725{ 594{
726 return (((unsigned int)(n >> 43)) & 0x1f); 595 return (((unsigned int)(n >> 43)) & 0x3f);
727} 596}
728 597
729static __inline unsigned int 598static __inline unsigned int
730get_ShAmt_Y0(tile_bundle_bits num) 599get_ShiftOpcodeExtension_X0(tilegx_bundle_bits num)
731{ 600{
732 const unsigned int n = (unsigned int)num; 601 const unsigned int n = (unsigned int)num;
733 return (((n >> 12)) & 0x1f); 602 return (((n >> 18)) & 0x3ff);
734} 603}
735 604
736static __inline unsigned int 605static __inline unsigned int
737get_ShAmt_Y1(tile_bundle_bits n) 606get_ShiftOpcodeExtension_X1(tilegx_bundle_bits n)
738{ 607{
739 return (((unsigned int)(n >> 43)) & 0x1f); 608 return (((unsigned int)(n >> 49)) & 0x3ff);
740} 609}
741 610
742static __inline unsigned int 611static __inline unsigned int
743get_SrcA_X0(tile_bundle_bits num) 612get_ShiftOpcodeExtension_Y0(tilegx_bundle_bits num)
744{ 613{
745 const unsigned int n = (unsigned int)num; 614 const unsigned int n = (unsigned int)num;
746 return (((n >> 6)) & 0x3f); 615 return (((n >> 18)) & 0x3);
747} 616}
748 617
749static __inline unsigned int 618static __inline unsigned int
750get_SrcA_X1(tile_bundle_bits n) 619get_ShiftOpcodeExtension_Y1(tilegx_bundle_bits n)
751{ 620{
752 return (((unsigned int)(n >> 37)) & 0x3f); 621 return (((unsigned int)(n >> 49)) & 0x3);
753} 622}
754 623
755static __inline unsigned int 624static __inline unsigned int
756get_SrcA_Y0(tile_bundle_bits num) 625get_SrcA_X0(tilegx_bundle_bits num)
757{ 626{
758 const unsigned int n = (unsigned int)num; 627 const unsigned int n = (unsigned int)num;
759 return (((n >> 6)) & 0x3f); 628 return (((n >> 6)) & 0x3f);
760} 629}
761 630
762static __inline unsigned int 631static __inline unsigned int
763get_SrcA_Y1(tile_bundle_bits n) 632get_SrcA_X1(tilegx_bundle_bits n)
764{ 633{
765 return (((unsigned int)(n >> 37)) & 0x3f); 634 return (((unsigned int)(n >> 37)) & 0x3f);
766} 635}
767 636
768static __inline unsigned int 637static __inline unsigned int
769get_SrcA_Y2(tile_bundle_bits n) 638get_SrcA_Y0(tilegx_bundle_bits num)
770{ 639{
771 return (((n >> 26)) & 0x00000001) | 640 const unsigned int n = (unsigned int)num;
772 (((unsigned int)(n >> 50)) & 0x0000003e); 641 return (((n >> 6)) & 0x3f);
773} 642}
774 643
775static __inline unsigned int 644static __inline unsigned int
776get_SrcBDest_Y2(tile_bundle_bits num) 645get_SrcA_Y1(tilegx_bundle_bits n)
777{ 646{
778 const unsigned int n = (unsigned int)num; 647 return (((unsigned int)(n >> 37)) & 0x3f);
779 return (((n >> 20)) & 0x3f);
780} 648}
781 649
782static __inline unsigned int 650static __inline unsigned int
783get_SrcB_X0(tile_bundle_bits num) 651get_SrcA_Y2(tilegx_bundle_bits num)
784{ 652{
785 const unsigned int n = (unsigned int)num; 653 const unsigned int n = (unsigned int)num;
786 return (((n >> 12)) & 0x3f); 654 return (((n >> 20)) & 0x3f);
787} 655}
788 656
789static __inline unsigned int 657static __inline unsigned int
790get_SrcB_X1(tile_bundle_bits n) 658get_SrcBDest_Y2(tilegx_bundle_bits n)
791{ 659{
792 return (((unsigned int)(n >> 43)) & 0x3f); 660 return (((unsigned int)(n >> 51)) & 0x3f);
793} 661}
794 662
795static __inline unsigned int 663static __inline unsigned int
796get_SrcB_Y0(tile_bundle_bits num) 664get_SrcB_X0(tilegx_bundle_bits num)
797{ 665{
798 const unsigned int n = (unsigned int)num; 666 const unsigned int n = (unsigned int)num;
799 return (((n >> 12)) & 0x3f); 667 return (((n >> 12)) & 0x3f);
800} 668}
801 669
802static __inline unsigned int 670static __inline unsigned int
803get_SrcB_Y1(tile_bundle_bits n) 671get_SrcB_X1(tilegx_bundle_bits n)
804{ 672{
805 return (((unsigned int)(n >> 43)) & 0x3f); 673 return (((unsigned int)(n >> 43)) & 0x3f);
806} 674}
807 675
808static __inline unsigned int 676static __inline unsigned int
809get_Src_SN(tile_bundle_bits num) 677get_SrcB_Y0(tilegx_bundle_bits num)
810{ 678{
811 const unsigned int n = (unsigned int)num; 679 const unsigned int n = (unsigned int)num;
812 return (((n >> 0)) & 0x3); 680 return (((n >> 12)) & 0x3f);
813}
814
815static __inline unsigned int
816get_UnOpcodeExtension_X0(tile_bundle_bits num)
817{
818 const unsigned int n = (unsigned int)num;
819 return (((n >> 12)) & 0x1f);
820}
821
822static __inline unsigned int
823get_UnOpcodeExtension_X1(tile_bundle_bits n)
824{
825 return (((unsigned int)(n >> 43)) & 0x1f);
826}
827
828static __inline unsigned int
829get_UnOpcodeExtension_Y0(tile_bundle_bits num)
830{
831 const unsigned int n = (unsigned int)num;
832 return (((n >> 12)) & 0x1f);
833} 681}
834 682
835static __inline unsigned int 683static __inline unsigned int
836get_UnOpcodeExtension_Y1(tile_bundle_bits n) 684get_SrcB_Y1(tilegx_bundle_bits n)
837{ 685{
838 return (((unsigned int)(n >> 43)) & 0x1f); 686 return (((unsigned int)(n >> 43)) & 0x3f);
839} 687}
840 688
841static __inline unsigned int 689static __inline unsigned int
842get_UnShOpcodeExtension_X0(tile_bundle_bits num) 690get_UnaryOpcodeExtension_X0(tilegx_bundle_bits num)
843{ 691{
844 const unsigned int n = (unsigned int)num; 692 const unsigned int n = (unsigned int)num;
845 return (((n >> 17)) & 0x3ff); 693 return (((n >> 12)) & 0x3f);
846} 694}
847 695
848static __inline unsigned int 696static __inline unsigned int
849get_UnShOpcodeExtension_X1(tile_bundle_bits n) 697get_UnaryOpcodeExtension_X1(tilegx_bundle_bits n)
850{ 698{
851 return (((unsigned int)(n >> 48)) & 0x3ff); 699 return (((unsigned int)(n >> 43)) & 0x3f);
852} 700}
853 701
854static __inline unsigned int 702static __inline unsigned int
855get_UnShOpcodeExtension_Y0(tile_bundle_bits num) 703get_UnaryOpcodeExtension_Y0(tilegx_bundle_bits num)
856{ 704{
857 const unsigned int n = (unsigned int)num; 705 const unsigned int n = (unsigned int)num;
858 return (((n >> 17)) & 0x7); 706 return (((n >> 12)) & 0x3f);
859} 707}
860 708
861static __inline unsigned int 709static __inline unsigned int
862get_UnShOpcodeExtension_Y1(tile_bundle_bits n) 710get_UnaryOpcodeExtension_Y1(tilegx_bundle_bits n)
863{ 711{
864 return (((unsigned int)(n >> 48)) & 0x7); 712 return (((unsigned int)(n >> 43)) & 0x3f);
865} 713}
866 714
867 715
@@ -874,546 +722,441 @@ sign_extend(int n, int num_bits)
874 722
875 723
876 724
877static __inline tile_bundle_bits 725static __inline tilegx_bundle_bits
878create_BrOff_SN(int num) 726create_BFEnd_X0(int num)
879{ 727{
880 const unsigned int n = (unsigned int)num; 728 const unsigned int n = (unsigned int)num;
881 return ((n & 0x3ff) << 0); 729 return ((n & 0x3f) << 12);
882} 730}
883 731
884static __inline tile_bundle_bits 732static __inline tilegx_bundle_bits
885create_BrOff_X1(int num) 733create_BFOpcodeExtension_X0(int num)
886{ 734{
887 const unsigned int n = (unsigned int)num; 735 const unsigned int n = (unsigned int)num;
888 return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | 736 return ((n & 0xf) << 24);
889 (((tile_bundle_bits)(n & 0x00018000)) << 20);
890} 737}
891 738
892static __inline tile_bundle_bits 739static __inline tilegx_bundle_bits
893create_BrType_X1(int num) 740create_BFStart_X0(int num)
894{ 741{
895 const unsigned int n = (unsigned int)num; 742 const unsigned int n = (unsigned int)num;
896 return (((tile_bundle_bits)(n & 0xf)) << 31); 743 return ((n & 0x3f) << 18);
897} 744}
898 745
899static __inline tile_bundle_bits 746static __inline tilegx_bundle_bits
900create_Dest_Imm8_X1(int num) 747create_BrOff_X1(int num)
901{ 748{
902 const unsigned int n = (unsigned int)num; 749 const unsigned int n = (unsigned int)num;
903 return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | 750 return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
904 (((tile_bundle_bits)(n & 0x000000c0)) << 43); 751 (((tilegx_bundle_bits)(n & 0x0001ffc0)) << 37);
905} 752}
906 753
907static __inline tile_bundle_bits 754static __inline tilegx_bundle_bits
908create_Dest_SN(int num) 755create_BrType_X1(int num)
756{
757 const unsigned int n = (unsigned int)num;
758 return (((tilegx_bundle_bits)(n & 0x1f)) << 54);
759}
760
761static __inline tilegx_bundle_bits
762create_Dest_Imm8_X1(int num)
909{ 763{
910 const unsigned int n = (unsigned int)num; 764 const unsigned int n = (unsigned int)num;
911 return ((n & 0x3) << 2); 765 return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
766 (((tilegx_bundle_bits)(n & 0x000000c0)) << 43);
912} 767}
913 768
914static __inline tile_bundle_bits 769static __inline tilegx_bundle_bits
915create_Dest_X0(int num) 770create_Dest_X0(int num)
916{ 771{
917 const unsigned int n = (unsigned int)num; 772 const unsigned int n = (unsigned int)num;
918 return ((n & 0x3f) << 0); 773 return ((n & 0x3f) << 0);
919} 774}
920 775
921static __inline tile_bundle_bits 776static __inline tilegx_bundle_bits
922create_Dest_X1(int num) 777create_Dest_X1(int num)
923{ 778{
924 const unsigned int n = (unsigned int)num; 779 const unsigned int n = (unsigned int)num;
925 return (((tile_bundle_bits)(n & 0x3f)) << 31); 780 return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
926} 781}
927 782
928static __inline tile_bundle_bits 783static __inline tilegx_bundle_bits
929create_Dest_Y0(int num) 784create_Dest_Y0(int num)
930{ 785{
931 const unsigned int n = (unsigned int)num; 786 const unsigned int n = (unsigned int)num;
932 return ((n & 0x3f) << 0); 787 return ((n & 0x3f) << 0);
933} 788}
934 789
935static __inline tile_bundle_bits 790static __inline tilegx_bundle_bits
936create_Dest_Y1(int num) 791create_Dest_Y1(int num)
937{ 792{
938 const unsigned int n = (unsigned int)num; 793 const unsigned int n = (unsigned int)num;
939 return (((tile_bundle_bits)(n & 0x3f)) << 31); 794 return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
940} 795}
941 796
942static __inline tile_bundle_bits 797static __inline tilegx_bundle_bits
943create_Imm16_X0(int num) 798create_Imm16_X0(int num)
944{ 799{
945 const unsigned int n = (unsigned int)num; 800 const unsigned int n = (unsigned int)num;
946 return ((n & 0xffff) << 12); 801 return ((n & 0xffff) << 12);
947} 802}
948 803
949static __inline tile_bundle_bits 804static __inline tilegx_bundle_bits
950create_Imm16_X1(int num) 805create_Imm16_X1(int num)
951{ 806{
952 const unsigned int n = (unsigned int)num; 807 const unsigned int n = (unsigned int)num;
953 return (((tile_bundle_bits)(n & 0xffff)) << 43); 808 return (((tilegx_bundle_bits)(n & 0xffff)) << 43);
954} 809}
955 810
956static __inline tile_bundle_bits 811static __inline tilegx_bundle_bits
957create_Imm8_SN(int num) 812create_Imm8OpcodeExtension_X0(int num)
958{ 813{
959 const unsigned int n = (unsigned int)num; 814 const unsigned int n = (unsigned int)num;
960 return ((n & 0xff) << 0); 815 return ((n & 0xff) << 20);
961} 816}
962 817
963static __inline tile_bundle_bits 818static __inline tilegx_bundle_bits
819create_Imm8OpcodeExtension_X1(int num)
820{
821 const unsigned int n = (unsigned int)num;
822 return (((tilegx_bundle_bits)(n & 0xff)) << 51);
823}
824
825static __inline tilegx_bundle_bits
964create_Imm8_X0(int num) 826create_Imm8_X0(int num)
965{ 827{
966 const unsigned int n = (unsigned int)num; 828 const unsigned int n = (unsigned int)num;
967 return ((n & 0xff) << 12); 829 return ((n & 0xff) << 12);
968} 830}
969 831
970static __inline tile_bundle_bits 832static __inline tilegx_bundle_bits
971create_Imm8_X1(int num) 833create_Imm8_X1(int num)
972{ 834{
973 const unsigned int n = (unsigned int)num; 835 const unsigned int n = (unsigned int)num;
974 return (((tile_bundle_bits)(n & 0xff)) << 43); 836 return (((tilegx_bundle_bits)(n & 0xff)) << 43);
975} 837}
976 838
977static __inline tile_bundle_bits 839static __inline tilegx_bundle_bits
978create_Imm8_Y0(int num) 840create_Imm8_Y0(int num)
979{ 841{
980 const unsigned int n = (unsigned int)num; 842 const unsigned int n = (unsigned int)num;
981 return ((n & 0xff) << 12); 843 return ((n & 0xff) << 12);
982} 844}
983 845
984static __inline tile_bundle_bits 846static __inline tilegx_bundle_bits
985create_Imm8_Y1(int num) 847create_Imm8_Y1(int num)
986{ 848{
987 const unsigned int n = (unsigned int)num; 849 const unsigned int n = (unsigned int)num;
988 return (((tile_bundle_bits)(n & 0xff)) << 43); 850 return (((tilegx_bundle_bits)(n & 0xff)) << 43);
989}
990
991static __inline tile_bundle_bits
992create_ImmOpcodeExtension_X0(int num)
993{
994 const unsigned int n = (unsigned int)num;
995 return ((n & 0x7f) << 20);
996}
997
998static __inline tile_bundle_bits
999create_ImmOpcodeExtension_X1(int num)
1000{
1001 const unsigned int n = (unsigned int)num;
1002 return (((tile_bundle_bits)(n & 0x7f)) << 51);
1003}
1004
1005static __inline tile_bundle_bits
1006create_ImmRROpcodeExtension_SN(int num)
1007{
1008 const unsigned int n = (unsigned int)num;
1009 return ((n & 0x3) << 8);
1010}
1011
1012static __inline tile_bundle_bits
1013create_JOffLong_X1(int num)
1014{
1015 const unsigned int n = (unsigned int)num;
1016 return (((tile_bundle_bits)(n & 0x00007fff)) << 43) |
1017 (((tile_bundle_bits)(n & 0x00018000)) << 20) |
1018 (((tile_bundle_bits)(n & 0x001e0000)) << 14) |
1019 (((tile_bundle_bits)(n & 0x07e00000)) << 16) |
1020 (((tile_bundle_bits)(n & 0x18000000)) << 31);
1021}
1022
1023static __inline tile_bundle_bits
1024create_JOff_X1(int num)
1025{
1026 const unsigned int n = (unsigned int)num;
1027 return (((tile_bundle_bits)(n & 0x00007fff)) << 43) |
1028 (((tile_bundle_bits)(n & 0x00018000)) << 20) |
1029 (((tile_bundle_bits)(n & 0x001e0000)) << 14) |
1030 (((tile_bundle_bits)(n & 0x07e00000)) << 16) |
1031 (((tile_bundle_bits)(n & 0x08000000)) << 31);
1032}
1033
1034static __inline tile_bundle_bits
1035create_MF_Imm15_X1(int num)
1036{
1037 const unsigned int n = (unsigned int)num;
1038 return (((tile_bundle_bits)(n & 0x00003fff)) << 37) |
1039 (((tile_bundle_bits)(n & 0x00004000)) << 44);
1040} 851}
1041 852
1042static __inline tile_bundle_bits 853static __inline tilegx_bundle_bits
1043create_MMEnd_X0(int num) 854create_JumpOff_X1(int num)
1044{ 855{
1045 const unsigned int n = (unsigned int)num; 856 const unsigned int n = (unsigned int)num;
1046 return ((n & 0x1f) << 18); 857 return (((tilegx_bundle_bits)(n & 0x7ffffff)) << 31);
1047} 858}
1048 859
1049static __inline tile_bundle_bits 860static __inline tilegx_bundle_bits
1050create_MMEnd_X1(int num) 861create_JumpOpcodeExtension_X1(int num)
1051{ 862{
1052 const unsigned int n = (unsigned int)num; 863 const unsigned int n = (unsigned int)num;
1053 return (((tile_bundle_bits)(n & 0x1f)) << 49); 864 return (((tilegx_bundle_bits)(n & 0x1)) << 58);
1054} 865}
1055 866
1056static __inline tile_bundle_bits 867static __inline tilegx_bundle_bits
1057create_MMStart_X0(int num) 868create_MF_Imm14_X1(int num)
1058{ 869{
1059 const unsigned int n = (unsigned int)num; 870 const unsigned int n = (unsigned int)num;
1060 return ((n & 0x1f) << 23); 871 return (((tilegx_bundle_bits)(n & 0x3fff)) << 37);
1061} 872}
1062 873
1063static __inline tile_bundle_bits 874static __inline tilegx_bundle_bits
1064create_MMStart_X1(int num) 875create_MT_Imm14_X1(int num)
1065{ 876{
1066 const unsigned int n = (unsigned int)num; 877 const unsigned int n = (unsigned int)num;
1067 return (((tile_bundle_bits)(n & 0x1f)) << 54); 878 return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
879 (((tilegx_bundle_bits)(n & 0x00003fc0)) << 37);
1068} 880}
1069 881
1070static __inline tile_bundle_bits 882static __inline tilegx_bundle_bits
1071create_MT_Imm15_X1(int num)
1072{
1073 const unsigned int n = (unsigned int)num;
1074 return (((tile_bundle_bits)(n & 0x0000003f)) << 31) |
1075 (((tile_bundle_bits)(n & 0x00003fc0)) << 37) |
1076 (((tile_bundle_bits)(n & 0x00004000)) << 44);
1077}
1078
1079static __inline tile_bundle_bits
1080create_Mode(int num) 883create_Mode(int num)
1081{ 884{
1082 const unsigned int n = (unsigned int)num; 885 const unsigned int n = (unsigned int)num;
1083 return (((tile_bundle_bits)(n & 0x1)) << 63); 886 return (((tilegx_bundle_bits)(n & 0x3)) << 62);
1084} 887}
1085 888
1086static __inline tile_bundle_bits 889static __inline tilegx_bundle_bits
1087create_NoRegOpcodeExtension_SN(int num)
1088{
1089 const unsigned int n = (unsigned int)num;
1090 return ((n & 0xf) << 0);
1091}
1092
1093static __inline tile_bundle_bits
1094create_Opcode_SN(int num)
1095{
1096 const unsigned int n = (unsigned int)num;
1097 return ((n & 0x3f) << 10);
1098}
1099
1100static __inline tile_bundle_bits
1101create_Opcode_X0(int num) 890create_Opcode_X0(int num)
1102{ 891{
1103 const unsigned int n = (unsigned int)num; 892 const unsigned int n = (unsigned int)num;
1104 return ((n & 0x7) << 28); 893 return ((n & 0x7) << 28);
1105} 894}
1106 895
1107static __inline tile_bundle_bits 896static __inline tilegx_bundle_bits
1108create_Opcode_X1(int num) 897create_Opcode_X1(int num)
1109{ 898{
1110 const unsigned int n = (unsigned int)num; 899 const unsigned int n = (unsigned int)num;
1111 return (((tile_bundle_bits)(n & 0xf)) << 59); 900 return (((tilegx_bundle_bits)(n & 0x7)) << 59);
1112} 901}
1113 902
1114static __inline tile_bundle_bits 903static __inline tilegx_bundle_bits
1115create_Opcode_Y0(int num) 904create_Opcode_Y0(int num)
1116{ 905{
1117 const unsigned int n = (unsigned int)num; 906 const unsigned int n = (unsigned int)num;
1118 return ((n & 0xf) << 27); 907 return ((n & 0xf) << 27);
1119} 908}
1120 909
1121static __inline tile_bundle_bits 910static __inline tilegx_bundle_bits
1122create_Opcode_Y1(int num) 911create_Opcode_Y1(int num)
1123{ 912{
1124 const unsigned int n = (unsigned int)num; 913 const unsigned int n = (unsigned int)num;
1125 return (((tile_bundle_bits)(n & 0xf)) << 59); 914 return (((tilegx_bundle_bits)(n & 0xf)) << 58);
1126} 915}
1127 916
1128static __inline tile_bundle_bits 917static __inline tilegx_bundle_bits
1129create_Opcode_Y2(int num) 918create_Opcode_Y2(int num)
1130{ 919{
1131 const unsigned int n = (unsigned int)num; 920 const unsigned int n = (unsigned int)num;
1132 return (((tile_bundle_bits)(n & 0x7)) << 56); 921 return ((n & 0x00000001) << 26) |
1133} 922 (((tilegx_bundle_bits)(n & 0x00000002)) << 56);
1134
1135static __inline tile_bundle_bits
1136create_RROpcodeExtension_SN(int num)
1137{
1138 const unsigned int n = (unsigned int)num;
1139 return ((n & 0xf) << 4);
1140} 923}
1141 924
1142static __inline tile_bundle_bits 925static __inline tilegx_bundle_bits
1143create_RRROpcodeExtension_X0(int num) 926create_RRROpcodeExtension_X0(int num)
1144{ 927{
1145 const unsigned int n = (unsigned int)num; 928 const unsigned int n = (unsigned int)num;
1146 return ((n & 0x1ff) << 18); 929 return ((n & 0x3ff) << 18);
1147} 930}
1148 931
1149static __inline tile_bundle_bits 932static __inline tilegx_bundle_bits
1150create_RRROpcodeExtension_X1(int num) 933create_RRROpcodeExtension_X1(int num)
1151{ 934{
1152 const unsigned int n = (unsigned int)num; 935 const unsigned int n = (unsigned int)num;
1153 return (((tile_bundle_bits)(n & 0x1ff)) << 49); 936 return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
1154} 937}
1155 938
1156static __inline tile_bundle_bits 939static __inline tilegx_bundle_bits
1157create_RRROpcodeExtension_Y0(int num) 940create_RRROpcodeExtension_Y0(int num)
1158{ 941{
1159 const unsigned int n = (unsigned int)num; 942 const unsigned int n = (unsigned int)num;
1160 return ((n & 0x3) << 18); 943 return ((n & 0x3) << 18);
1161} 944}
1162 945
1163static __inline tile_bundle_bits 946static __inline tilegx_bundle_bits
1164create_RRROpcodeExtension_Y1(int num) 947create_RRROpcodeExtension_Y1(int num)
1165{ 948{
1166 const unsigned int n = (unsigned int)num; 949 const unsigned int n = (unsigned int)num;
1167 return (((tile_bundle_bits)(n & 0x3)) << 49); 950 return (((tilegx_bundle_bits)(n & 0x3)) << 49);
1168} 951}
1169 952
1170static __inline tile_bundle_bits 953static __inline tilegx_bundle_bits
1171create_RouteOpcodeExtension_SN(int num) 954create_ShAmt_X0(int num)
1172{ 955{
1173 const unsigned int n = (unsigned int)num; 956 const unsigned int n = (unsigned int)num;
1174 return ((n & 0x3ff) << 0); 957 return ((n & 0x3f) << 12);
1175} 958}
1176 959
1177static __inline tile_bundle_bits 960static __inline tilegx_bundle_bits
1178create_S_X0(int num) 961create_ShAmt_X1(int num)
1179{ 962{
1180 const unsigned int n = (unsigned int)num; 963 const unsigned int n = (unsigned int)num;
1181 return ((n & 0x1) << 27); 964 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1182} 965}
1183 966
1184static __inline tile_bundle_bits 967static __inline tilegx_bundle_bits
1185create_S_X1(int num) 968create_ShAmt_Y0(int num)
1186{ 969{
1187 const unsigned int n = (unsigned int)num; 970 const unsigned int n = (unsigned int)num;
1188 return (((tile_bundle_bits)(n & 0x1)) << 58); 971 return ((n & 0x3f) << 12);
1189} 972}
1190 973
1191static __inline tile_bundle_bits 974static __inline tilegx_bundle_bits
1192create_ShAmt_X0(int num) 975create_ShAmt_Y1(int num)
1193{ 976{
1194 const unsigned int n = (unsigned int)num; 977 const unsigned int n = (unsigned int)num;
1195 return ((n & 0x1f) << 12); 978 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1196} 979}
1197 980
1198static __inline tile_bundle_bits 981static __inline tilegx_bundle_bits
1199create_ShAmt_X1(int num) 982create_ShiftOpcodeExtension_X0(int num)
1200{ 983{
1201 const unsigned int n = (unsigned int)num; 984 const unsigned int n = (unsigned int)num;
1202 return (((tile_bundle_bits)(n & 0x1f)) << 43); 985 return ((n & 0x3ff) << 18);
1203} 986}
1204 987
1205static __inline tile_bundle_bits 988static __inline tilegx_bundle_bits
1206create_ShAmt_Y0(int num) 989create_ShiftOpcodeExtension_X1(int num)
1207{ 990{
1208 const unsigned int n = (unsigned int)num; 991 const unsigned int n = (unsigned int)num;
1209 return ((n & 0x1f) << 12); 992 return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
1210} 993}
1211 994
1212static __inline tile_bundle_bits 995static __inline tilegx_bundle_bits
1213create_ShAmt_Y1(int num) 996create_ShiftOpcodeExtension_Y0(int num)
1214{ 997{
1215 const unsigned int n = (unsigned int)num; 998 const unsigned int n = (unsigned int)num;
1216 return (((tile_bundle_bits)(n & 0x1f)) << 43); 999 return ((n & 0x3) << 18);
1217} 1000}
1218 1001
1219static __inline tile_bundle_bits 1002static __inline tilegx_bundle_bits
1003create_ShiftOpcodeExtension_Y1(int num)
1004{
1005 const unsigned int n = (unsigned int)num;
1006 return (((tilegx_bundle_bits)(n & 0x3)) << 49);
1007}
1008
1009static __inline tilegx_bundle_bits
1220create_SrcA_X0(int num) 1010create_SrcA_X0(int num)
1221{ 1011{
1222 const unsigned int n = (unsigned int)num; 1012 const unsigned int n = (unsigned int)num;
1223 return ((n & 0x3f) << 6); 1013 return ((n & 0x3f) << 6);
1224} 1014}
1225 1015
1226static __inline tile_bundle_bits 1016static __inline tilegx_bundle_bits
1227create_SrcA_X1(int num) 1017create_SrcA_X1(int num)
1228{ 1018{
1229 const unsigned int n = (unsigned int)num; 1019 const unsigned int n = (unsigned int)num;
1230 return (((tile_bundle_bits)(n & 0x3f)) << 37); 1020 return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
1231} 1021}
1232 1022
1233static __inline tile_bundle_bits 1023static __inline tilegx_bundle_bits
1234create_SrcA_Y0(int num) 1024create_SrcA_Y0(int num)
1235{ 1025{
1236 const unsigned int n = (unsigned int)num; 1026 const unsigned int n = (unsigned int)num;
1237 return ((n & 0x3f) << 6); 1027 return ((n & 0x3f) << 6);
1238} 1028}
1239 1029
1240static __inline tile_bundle_bits 1030static __inline tilegx_bundle_bits
1241create_SrcA_Y1(int num) 1031create_SrcA_Y1(int num)
1242{ 1032{
1243 const unsigned int n = (unsigned int)num; 1033 const unsigned int n = (unsigned int)num;
1244 return (((tile_bundle_bits)(n & 0x3f)) << 37); 1034 return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
1245} 1035}
1246 1036
1247static __inline tile_bundle_bits 1037static __inline tilegx_bundle_bits
1248create_SrcA_Y2(int num) 1038create_SrcA_Y2(int num)
1249{ 1039{
1250 const unsigned int n = (unsigned int)num; 1040 const unsigned int n = (unsigned int)num;
1251 return ((n & 0x00000001) << 26) | 1041 return ((n & 0x3f) << 20);
1252 (((tile_bundle_bits)(n & 0x0000003e)) << 50);
1253} 1042}
1254 1043
1255static __inline tile_bundle_bits 1044static __inline tilegx_bundle_bits
1256create_SrcBDest_Y2(int num) 1045create_SrcBDest_Y2(int num)
1257{ 1046{
1258 const unsigned int n = (unsigned int)num; 1047 const unsigned int n = (unsigned int)num;
1259 return ((n & 0x3f) << 20); 1048 return (((tilegx_bundle_bits)(n & 0x3f)) << 51);
1260} 1049}
1261 1050
1262static __inline tile_bundle_bits 1051static __inline tilegx_bundle_bits
1263create_SrcB_X0(int num) 1052create_SrcB_X0(int num)
1264{ 1053{
1265 const unsigned int n = (unsigned int)num; 1054 const unsigned int n = (unsigned int)num;
1266 return ((n & 0x3f) << 12); 1055 return ((n & 0x3f) << 12);
1267} 1056}
1268 1057
1269static __inline tile_bundle_bits 1058static __inline tilegx_bundle_bits
1270create_SrcB_X1(int num) 1059create_SrcB_X1(int num)
1271{ 1060{
1272 const unsigned int n = (unsigned int)num; 1061 const unsigned int n = (unsigned int)num;
1273 return (((tile_bundle_bits)(n & 0x3f)) << 43); 1062 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1274} 1063}
1275 1064
1276static __inline tile_bundle_bits 1065static __inline tilegx_bundle_bits
1277create_SrcB_Y0(int num) 1066create_SrcB_Y0(int num)
1278{ 1067{
1279 const unsigned int n = (unsigned int)num; 1068 const unsigned int n = (unsigned int)num;
1280 return ((n & 0x3f) << 12); 1069 return ((n & 0x3f) << 12);
1281} 1070}
1282 1071
1283static __inline tile_bundle_bits 1072static __inline tilegx_bundle_bits
1284create_SrcB_Y1(int num) 1073create_SrcB_Y1(int num)
1285{ 1074{
1286 const unsigned int n = (unsigned int)num; 1075 const unsigned int n = (unsigned int)num;
1287 return (((tile_bundle_bits)(n & 0x3f)) << 43); 1076 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1288} 1077}
1289 1078
1290static __inline tile_bundle_bits 1079static __inline tilegx_bundle_bits
1291create_Src_SN(int num) 1080create_UnaryOpcodeExtension_X0(int num)
1292{ 1081{
1293 const unsigned int n = (unsigned int)num; 1082 const unsigned int n = (unsigned int)num;
1294 return ((n & 0x3) << 0); 1083 return ((n & 0x3f) << 12);
1295}
1296
1297static __inline tile_bundle_bits
1298create_UnOpcodeExtension_X0(int num)
1299{
1300 const unsigned int n = (unsigned int)num;
1301 return ((n & 0x1f) << 12);
1302}
1303
1304static __inline tile_bundle_bits
1305create_UnOpcodeExtension_X1(int num)
1306{
1307 const unsigned int n = (unsigned int)num;
1308 return (((tile_bundle_bits)(n & 0x1f)) << 43);
1309}
1310
1311static __inline tile_bundle_bits
1312create_UnOpcodeExtension_Y0(int num)
1313{
1314 const unsigned int n = (unsigned int)num;
1315 return ((n & 0x1f) << 12);
1316}
1317
1318static __inline tile_bundle_bits
1319create_UnOpcodeExtension_Y1(int num)
1320{
1321 const unsigned int n = (unsigned int)num;
1322 return (((tile_bundle_bits)(n & 0x1f)) << 43);
1323}
1324
1325static __inline tile_bundle_bits
1326create_UnShOpcodeExtension_X0(int num)
1327{
1328 const unsigned int n = (unsigned int)num;
1329 return ((n & 0x3ff) << 17);
1330} 1084}
1331 1085
1332static __inline tile_bundle_bits 1086static __inline tilegx_bundle_bits
1333create_UnShOpcodeExtension_X1(int num) 1087create_UnaryOpcodeExtension_X1(int num)
1334{ 1088{
1335 const unsigned int n = (unsigned int)num; 1089 const unsigned int n = (unsigned int)num;
1336 return (((tile_bundle_bits)(n & 0x3ff)) << 48); 1090 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1337} 1091}
1338 1092
1339static __inline tile_bundle_bits 1093static __inline tilegx_bundle_bits
1340create_UnShOpcodeExtension_Y0(int num) 1094create_UnaryOpcodeExtension_Y0(int num)
1341{ 1095{
1342 const unsigned int n = (unsigned int)num; 1096 const unsigned int n = (unsigned int)num;
1343 return ((n & 0x7) << 17); 1097 return ((n & 0x3f) << 12);
1344} 1098}
1345 1099
1346static __inline tile_bundle_bits 1100static __inline tilegx_bundle_bits
1347create_UnShOpcodeExtension_Y1(int num) 1101create_UnaryOpcodeExtension_Y1(int num)
1348{ 1102{
1349 const unsigned int n = (unsigned int)num; 1103 const unsigned int n = (unsigned int)num;
1350 return (((tile_bundle_bits)(n & 0x7)) << 48); 1104 return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
1351} 1105}
1352 1106
1353 1107
1354
1355typedef enum 1108typedef enum
1356{ 1109{
1357 TILE_PIPELINE_X0, 1110 TILEGX_PIPELINE_X0,
1358 TILE_PIPELINE_X1, 1111 TILEGX_PIPELINE_X1,
1359 TILE_PIPELINE_Y0, 1112 TILEGX_PIPELINE_Y0,
1360 TILE_PIPELINE_Y1, 1113 TILEGX_PIPELINE_Y1,
1361 TILE_PIPELINE_Y2, 1114 TILEGX_PIPELINE_Y2,
1362} tile_pipeline; 1115} tilegx_pipeline;
1363 1116
1364#define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) 1117#define tilegx_is_x_pipeline(p) ((int)(p) <= (int)TILEGX_PIPELINE_X1)
1365 1118
1366typedef enum 1119typedef enum
1367{ 1120{
1368 TILE_OP_TYPE_REGISTER, 1121 TILEGX_OP_TYPE_REGISTER,
1369 TILE_OP_TYPE_IMMEDIATE, 1122 TILEGX_OP_TYPE_IMMEDIATE,
1370 TILE_OP_TYPE_ADDRESS, 1123 TILEGX_OP_TYPE_ADDRESS,
1371 TILE_OP_TYPE_SPR 1124 TILEGX_OP_TYPE_SPR
1372} tile_operand_type; 1125} tilegx_operand_type;
1373 1126
1374/* This is the bit that determines if a bundle is in the Y encoding. */ 1127/* These are the bits that determine if a bundle is in the X encoding. */
1375#define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) 1128#define TILEGX_BUNDLE_MODE_MASK ((tilegx_bundle_bits)3 << 62)
1376 1129
1377enum 1130enum
1378{ 1131{
1379 /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ 1132 /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */
1380 TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, 1133 TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE = 3,
1381 1134
1382 /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ 1135 /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */
1383 TILE_NUM_PIPELINE_ENCODINGS = 5, 1136 TILEGX_NUM_PIPELINE_ENCODINGS = 5,
1384 1137
1385 /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ 1138 /* Log base 2 of TILEGX_BUNDLE_SIZE_IN_BYTES. */
1386 TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, 1139 TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES = 3,
1387 1140
1388 /* Instructions take this many bytes. */ 1141 /* Instructions take this many bytes. */
1389 TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, 1142 TILEGX_BUNDLE_SIZE_IN_BYTES = 1 << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES,
1390 1143
1391 /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ 1144 /* Log base 2 of TILEGX_BUNDLE_ALIGNMENT_IN_BYTES. */
1392 TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, 1145 TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3,
1393 1146
1394 /* Bundles should be aligned modulo this number of bytes. */ 1147 /* Bundles should be aligned modulo this number of bytes. */
1395 TILE_BUNDLE_ALIGNMENT_IN_BYTES = 1148 TILEGX_BUNDLE_ALIGNMENT_IN_BYTES =
1396 (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), 1149 (1 << TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES),
1397
1398 /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */
1399 TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1,
1400
1401 /* Static network instructions take this many bytes. */
1402 TILE_SN_INSTRUCTION_SIZE_IN_BYTES =
1403 (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES),
1404 1150
1405 /* Number of registers (some are magic, such as network I/O). */ 1151 /* Number of registers (some are magic, such as network I/O). */
1406 TILE_NUM_REGISTERS = 64, 1152 TILEGX_NUM_REGISTERS = 64,
1407
1408 /* Number of static network registers. */
1409 TILE_NUM_SN_REGISTERS = 4
1410}; 1153};
1411 1154
1412 1155
1413struct tile_operand 1156struct tilegx_operand
1414{ 1157{
1415 /* Is this operand a register, immediate or address? */ 1158 /* Is this operand a register, immediate or address? */
1416 tile_operand_type type; 1159 tilegx_operand_type type;
1417 1160
1418 /* The default relocation type for this operand. */ 1161 /* The default relocation type for this operand. */
1419 signed int default_reloc : 16; 1162 signed int default_reloc : 16;
@@ -1437,27 +1180,27 @@ struct tile_operand
1437 unsigned int rightshift : 2; 1180 unsigned int rightshift : 2;
1438 1181
1439 /* Return the bits for this operand to be ORed into an existing bundle. */ 1182 /* Return the bits for this operand to be ORed into an existing bundle. */
1440 tile_bundle_bits (*insert) (int op); 1183 tilegx_bundle_bits (*insert) (int op);
1441 1184
1442 /* Extract this operand and return it. */ 1185 /* Extract this operand and return it. */
1443 unsigned int (*extract) (tile_bundle_bits bundle); 1186 unsigned int (*extract) (tilegx_bundle_bits bundle);
1444}; 1187};
1445 1188
1446 1189
1447extern const struct tile_operand tile_operands[]; 1190extern const struct tilegx_operand tilegx_operands[];
1448 1191
1449/* One finite-state machine per pipe for rapid instruction decoding. */ 1192/* One finite-state machine per pipe for rapid instruction decoding. */
1450extern const unsigned short * const 1193extern const unsigned short * const
1451tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; 1194tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS];
1452 1195
1453 1196
1454struct tile_opcode 1197struct tilegx_opcode
1455{ 1198{
1456 /* The opcode mnemonic, e.g. "add" */ 1199 /* The opcode mnemonic, e.g. "add" */
1457 const char *name; 1200 const char *name;
1458 1201
1459 /* The enum value for this mnemonic. */ 1202 /* The enum value for this mnemonic. */
1460 tile_mnemonic mnemonic; 1203 tilegx_mnemonic mnemonic;
1461 1204
1462 /* A bit mask of which of the five pipes this instruction 1205 /* A bit mask of which of the five pipes this instruction
1463 is compatible with: 1206 is compatible with:
@@ -1478,29 +1221,28 @@ struct tile_opcode
1478 unsigned char can_bundle; 1221 unsigned char can_bundle;
1479 1222
1480 /* The description of the operands. Each of these is an 1223 /* The description of the operands. Each of these is an
1481 * index into the tile_operands[] table. */ 1224 * index into the tilegx_operands[] table. */
1482 unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; 1225 unsigned char operands[TILEGX_NUM_PIPELINE_ENCODINGS][TILEGX_MAX_OPERANDS];
1483 1226
1484}; 1227};
1485 1228
1486extern const struct tile_opcode tile_opcodes[]; 1229extern const struct tilegx_opcode tilegx_opcodes[];
1487
1488 1230
1489/* Used for non-textual disassembly into structs. */ 1231/* Used for non-textual disassembly into structs. */
1490struct tile_decoded_instruction 1232struct tilegx_decoded_instruction
1491{ 1233{
1492 const struct tile_opcode *opcode; 1234 const struct tilegx_opcode *opcode;
1493 const struct tile_operand *operands[TILE_MAX_OPERANDS]; 1235 const struct tilegx_operand *operands[TILEGX_MAX_OPERANDS];
1494 int operand_values[TILE_MAX_OPERANDS]; 1236 long long operand_values[TILEGX_MAX_OPERANDS];
1495}; 1237};
1496 1238
1497 1239
1498/* Disassemble a bundle into a struct for machine processing. */ 1240/* Disassemble a bundle into a struct for machine processing. */
1499extern int parse_insn_tile(tile_bundle_bits bits, 1241extern int parse_insn_tilegx(tilegx_bundle_bits bits,
1500 unsigned int pc, 1242 unsigned long long pc,
1501 struct tile_decoded_instruction 1243 struct tilegx_decoded_instruction
1502 decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); 1244 decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE]);
1503 1245
1504 1246
1505 1247
1506#endif /* opcode_tile_h */ 1248#endif /* opcode_tilegx_h */
diff --git a/arch/tile/include/asm/opcode_constants_64.h b/arch/tile/include/asm/opcode_constants_64.h
index 227d033b180c..710192869476 100644
--- a/arch/tile/include/asm/opcode_constants_64.h
+++ b/arch/tile/include/asm/opcode_constants_64.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -19,462 +19,591 @@
19#define _TILE_OPCODE_CONSTANTS_H 19#define _TILE_OPCODE_CONSTANTS_H
20enum 20enum
21{ 21{
22 ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, 22 ADDI_IMM8_OPCODE_X0 = 1,
23 ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, 23 ADDI_IMM8_OPCODE_X1 = 1,
24 ADDB_SPECIAL_0_OPCODE_X0 = 1, 24 ADDI_OPCODE_Y0 = 0,
25 ADDB_SPECIAL_0_OPCODE_X1 = 1, 25 ADDI_OPCODE_Y1 = 1,
26 ADDHS_SPECIAL_0_OPCODE_X0 = 99, 26 ADDLI_OPCODE_X0 = 1,
27 ADDHS_SPECIAL_0_OPCODE_X1 = 69, 27 ADDLI_OPCODE_X1 = 0,
28 ADDH_SPECIAL_0_OPCODE_X0 = 2, 28 ADDXI_IMM8_OPCODE_X0 = 2,
29 ADDH_SPECIAL_0_OPCODE_X1 = 2, 29 ADDXI_IMM8_OPCODE_X1 = 2,
30 ADDIB_IMM_0_OPCODE_X0 = 1, 30 ADDXI_OPCODE_Y0 = 1,
31 ADDIB_IMM_0_OPCODE_X1 = 1, 31 ADDXI_OPCODE_Y1 = 2,
32 ADDIH_IMM_0_OPCODE_X0 = 2, 32 ADDXLI_OPCODE_X0 = 2,
33 ADDIH_IMM_0_OPCODE_X1 = 2, 33 ADDXLI_OPCODE_X1 = 1,
34 ADDI_IMM_0_OPCODE_X0 = 3, 34 ADDXSC_RRR_0_OPCODE_X0 = 1,
35 ADDI_IMM_0_OPCODE_X1 = 3, 35 ADDXSC_RRR_0_OPCODE_X1 = 1,
36 ADDI_IMM_1_OPCODE_SN = 1, 36 ADDX_RRR_0_OPCODE_X0 = 2,
37 ADDI_OPCODE_Y0 = 9, 37 ADDX_RRR_0_OPCODE_X1 = 2,
38 ADDI_OPCODE_Y1 = 7, 38 ADDX_RRR_0_OPCODE_Y0 = 0,
39 ADDLIS_OPCODE_X0 = 1, 39 ADDX_SPECIAL_0_OPCODE_Y1 = 0,
40 ADDLIS_OPCODE_X1 = 2, 40 ADD_RRR_0_OPCODE_X0 = 3,
41 ADDLI_OPCODE_X0 = 2, 41 ADD_RRR_0_OPCODE_X1 = 3,
42 ADDLI_OPCODE_X1 = 3, 42 ADD_RRR_0_OPCODE_Y0 = 1,
43 ADDS_SPECIAL_0_OPCODE_X0 = 96, 43 ADD_SPECIAL_0_OPCODE_Y1 = 1,
44 ADDS_SPECIAL_0_OPCODE_X1 = 66, 44 ANDI_IMM8_OPCODE_X0 = 3,
45 ADD_SPECIAL_0_OPCODE_X0 = 3, 45 ANDI_IMM8_OPCODE_X1 = 3,
46 ADD_SPECIAL_0_OPCODE_X1 = 3, 46 ANDI_OPCODE_Y0 = 2,
47 ADD_SPECIAL_0_OPCODE_Y0 = 0, 47 ANDI_OPCODE_Y1 = 3,
48 ADD_SPECIAL_0_OPCODE_Y1 = 0, 48 AND_RRR_0_OPCODE_X0 = 4,
49 ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, 49 AND_RRR_0_OPCODE_X1 = 4,
50 ADIFFH_SPECIAL_0_OPCODE_X0 = 5, 50 AND_RRR_5_OPCODE_Y0 = 0,
51 ANDI_IMM_0_OPCODE_X0 = 1, 51 AND_RRR_5_OPCODE_Y1 = 0,
52 ANDI_IMM_0_OPCODE_X1 = 4, 52 BEQZT_BRANCH_OPCODE_X1 = 16,
53 ANDI_OPCODE_Y0 = 10, 53 BEQZ_BRANCH_OPCODE_X1 = 17,
54 ANDI_OPCODE_Y1 = 8, 54 BFEXTS_BF_OPCODE_X0 = 4,
55 AND_SPECIAL_0_OPCODE_X0 = 6, 55 BFEXTU_BF_OPCODE_X0 = 5,
56 AND_SPECIAL_0_OPCODE_X1 = 4, 56 BFINS_BF_OPCODE_X0 = 6,
57 AND_SPECIAL_2_OPCODE_Y0 = 0, 57 BF_OPCODE_X0 = 3,
58 AND_SPECIAL_2_OPCODE_Y1 = 0, 58 BGEZT_BRANCH_OPCODE_X1 = 18,
59 AULI_OPCODE_X0 = 3, 59 BGEZ_BRANCH_OPCODE_X1 = 19,
60 AULI_OPCODE_X1 = 4, 60 BGTZT_BRANCH_OPCODE_X1 = 20,
61 AVGB_U_SPECIAL_0_OPCODE_X0 = 7, 61 BGTZ_BRANCH_OPCODE_X1 = 21,
62 AVGH_SPECIAL_0_OPCODE_X0 = 8, 62 BLBCT_BRANCH_OPCODE_X1 = 22,
63 BBNST_BRANCH_OPCODE_X1 = 15, 63 BLBC_BRANCH_OPCODE_X1 = 23,
64 BBNS_BRANCH_OPCODE_X1 = 14, 64 BLBST_BRANCH_OPCODE_X1 = 24,
65 BBNS_OPCODE_SN = 63, 65 BLBS_BRANCH_OPCODE_X1 = 25,
66 BBST_BRANCH_OPCODE_X1 = 13, 66 BLEZT_BRANCH_OPCODE_X1 = 26,
67 BBS_BRANCH_OPCODE_X1 = 12, 67 BLEZ_BRANCH_OPCODE_X1 = 27,
68 BBS_OPCODE_SN = 62, 68 BLTZT_BRANCH_OPCODE_X1 = 28,
69 BGEZT_BRANCH_OPCODE_X1 = 7, 69 BLTZ_BRANCH_OPCODE_X1 = 29,
70 BGEZ_BRANCH_OPCODE_X1 = 6, 70 BNEZT_BRANCH_OPCODE_X1 = 30,
71 BGEZ_OPCODE_SN = 61, 71 BNEZ_BRANCH_OPCODE_X1 = 31,
72 BGZT_BRANCH_OPCODE_X1 = 5, 72 BRANCH_OPCODE_X1 = 2,
73 BGZ_BRANCH_OPCODE_X1 = 4, 73 CMOVEQZ_RRR_0_OPCODE_X0 = 5,
74 BGZ_OPCODE_SN = 58, 74 CMOVEQZ_RRR_4_OPCODE_Y0 = 0,
75 BITX_UN_0_SHUN_0_OPCODE_X0 = 1, 75 CMOVNEZ_RRR_0_OPCODE_X0 = 6,
76 BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, 76 CMOVNEZ_RRR_4_OPCODE_Y0 = 1,
77 BLEZT_BRANCH_OPCODE_X1 = 11, 77 CMPEQI_IMM8_OPCODE_X0 = 4,
78 BLEZ_BRANCH_OPCODE_X1 = 10, 78 CMPEQI_IMM8_OPCODE_X1 = 4,
79 BLEZ_OPCODE_SN = 59, 79 CMPEQI_OPCODE_Y0 = 3,
80 BLZT_BRANCH_OPCODE_X1 = 9, 80 CMPEQI_OPCODE_Y1 = 4,
81 BLZ_BRANCH_OPCODE_X1 = 8, 81 CMPEQ_RRR_0_OPCODE_X0 = 7,
82 BLZ_OPCODE_SN = 60, 82 CMPEQ_RRR_0_OPCODE_X1 = 5,
83 BNZT_BRANCH_OPCODE_X1 = 3, 83 CMPEQ_RRR_3_OPCODE_Y0 = 0,
84 BNZ_BRANCH_OPCODE_X1 = 2, 84 CMPEQ_RRR_3_OPCODE_Y1 = 2,
85 BNZ_OPCODE_SN = 57, 85 CMPEXCH4_RRR_0_OPCODE_X1 = 6,
86 BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, 86 CMPEXCH_RRR_0_OPCODE_X1 = 7,
87 BRANCH_OPCODE_X1 = 5, 87 CMPLES_RRR_0_OPCODE_X0 = 8,
88 BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, 88 CMPLES_RRR_0_OPCODE_X1 = 8,
89 BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, 89 CMPLES_RRR_2_OPCODE_Y0 = 0,
90 BZT_BRANCH_OPCODE_X1 = 1, 90 CMPLES_RRR_2_OPCODE_Y1 = 0,
91 BZ_BRANCH_OPCODE_X1 = 0, 91 CMPLEU_RRR_0_OPCODE_X0 = 9,
92 BZ_OPCODE_SN = 56, 92 CMPLEU_RRR_0_OPCODE_X1 = 9,
93 CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, 93 CMPLEU_RRR_2_OPCODE_Y0 = 1,
94 CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, 94 CMPLEU_RRR_2_OPCODE_Y1 = 1,
95 CRC32_32_SPECIAL_0_OPCODE_X0 = 9, 95 CMPLTSI_IMM8_OPCODE_X0 = 5,
96 CRC32_8_SPECIAL_0_OPCODE_X0 = 10, 96 CMPLTSI_IMM8_OPCODE_X1 = 5,
97 CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, 97 CMPLTSI_OPCODE_Y0 = 4,
98 CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, 98 CMPLTSI_OPCODE_Y1 = 5,
99 DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, 99 CMPLTS_RRR_0_OPCODE_X0 = 10,
100 DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, 100 CMPLTS_RRR_0_OPCODE_X1 = 10,
101 DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, 101 CMPLTS_RRR_2_OPCODE_Y0 = 2,
102 FINV_UN_0_SHUN_0_OPCODE_X1 = 3, 102 CMPLTS_RRR_2_OPCODE_Y1 = 2,
103 FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, 103 CMPLTUI_IMM8_OPCODE_X0 = 6,
104 FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, 104 CMPLTUI_IMM8_OPCODE_X1 = 6,
105 FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, 105 CMPLTU_RRR_0_OPCODE_X0 = 11,
106 FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, 106 CMPLTU_RRR_0_OPCODE_X1 = 11,
107 FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, 107 CMPLTU_RRR_2_OPCODE_Y0 = 3,
108 FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, 108 CMPLTU_RRR_2_OPCODE_Y1 = 3,
109 HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, 109 CMPNE_RRR_0_OPCODE_X0 = 12,
110 ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, 110 CMPNE_RRR_0_OPCODE_X1 = 12,
111 ILL_UN_0_SHUN_0_OPCODE_X1 = 7, 111 CMPNE_RRR_3_OPCODE_Y0 = 1,
112 ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, 112 CMPNE_RRR_3_OPCODE_Y1 = 3,
113 IMM_0_OPCODE_SN = 0, 113 CMULAF_RRR_0_OPCODE_X0 = 13,
114 IMM_0_OPCODE_X0 = 4, 114 CMULA_RRR_0_OPCODE_X0 = 14,
115 IMM_0_OPCODE_X1 = 6, 115 CMULFR_RRR_0_OPCODE_X0 = 15,
116 IMM_1_OPCODE_SN = 1, 116 CMULF_RRR_0_OPCODE_X0 = 16,
117 IMM_OPCODE_0_X0 = 5, 117 CMULHR_RRR_0_OPCODE_X0 = 17,
118 INTHB_SPECIAL_0_OPCODE_X0 = 11, 118 CMULH_RRR_0_OPCODE_X0 = 18,
119 INTHB_SPECIAL_0_OPCODE_X1 = 5, 119 CMUL_RRR_0_OPCODE_X0 = 19,
120 INTHH_SPECIAL_0_OPCODE_X0 = 12, 120 CNTLZ_UNARY_OPCODE_X0 = 1,
121 INTHH_SPECIAL_0_OPCODE_X1 = 6, 121 CNTLZ_UNARY_OPCODE_Y0 = 1,
122 INTLB_SPECIAL_0_OPCODE_X0 = 13, 122 CNTTZ_UNARY_OPCODE_X0 = 2,
123 INTLB_SPECIAL_0_OPCODE_X1 = 7, 123 CNTTZ_UNARY_OPCODE_Y0 = 2,
124 INTLH_SPECIAL_0_OPCODE_X0 = 14, 124 CRC32_32_RRR_0_OPCODE_X0 = 20,
125 INTLH_SPECIAL_0_OPCODE_X1 = 8, 125 CRC32_8_RRR_0_OPCODE_X0 = 21,
126 INV_UN_0_SHUN_0_OPCODE_X1 = 8, 126 DBLALIGN2_RRR_0_OPCODE_X0 = 22,
127 IRET_UN_0_SHUN_0_OPCODE_X1 = 9, 127 DBLALIGN2_RRR_0_OPCODE_X1 = 13,
128 JALB_OPCODE_X1 = 13, 128 DBLALIGN4_RRR_0_OPCODE_X0 = 23,
129 JALF_OPCODE_X1 = 12, 129 DBLALIGN4_RRR_0_OPCODE_X1 = 14,
130 JALRP_SPECIAL_0_OPCODE_X1 = 9, 130 DBLALIGN6_RRR_0_OPCODE_X0 = 24,
131 JALRR_IMM_1_OPCODE_SN = 3, 131 DBLALIGN6_RRR_0_OPCODE_X1 = 15,
132 JALR_RR_IMM_0_OPCODE_SN = 5, 132 DBLALIGN_RRR_0_OPCODE_X0 = 25,
133 JALR_SPECIAL_0_OPCODE_X1 = 10, 133 DRAIN_UNARY_OPCODE_X1 = 1,
134 JB_OPCODE_X1 = 11, 134 DTLBPR_UNARY_OPCODE_X1 = 2,
135 JF_OPCODE_X1 = 10, 135 EXCH4_RRR_0_OPCODE_X1 = 16,
136 JRP_SPECIAL_0_OPCODE_X1 = 11, 136 EXCH_RRR_0_OPCODE_X1 = 17,
137 JRR_IMM_1_OPCODE_SN = 2, 137 FDOUBLE_ADDSUB_RRR_0_OPCODE_X0 = 26,
138 JR_RR_IMM_0_OPCODE_SN = 4, 138 FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0 = 27,
139 JR_SPECIAL_0_OPCODE_X1 = 12, 139 FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0 = 28,
140 LBADD_IMM_0_OPCODE_X1 = 22, 140 FDOUBLE_PACK1_RRR_0_OPCODE_X0 = 29,
141 LBADD_U_IMM_0_OPCODE_X1 = 23, 141 FDOUBLE_PACK2_RRR_0_OPCODE_X0 = 30,
142 LB_OPCODE_Y2 = 0, 142 FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0 = 31,
143 LB_UN_0_SHUN_0_OPCODE_X1 = 10, 143 FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0 = 32,
144 LB_U_OPCODE_Y2 = 1, 144 FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0 = 33,
145 LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, 145 FETCHADD4_RRR_0_OPCODE_X1 = 18,
146 LHADD_IMM_0_OPCODE_X1 = 24, 146 FETCHADDGEZ4_RRR_0_OPCODE_X1 = 19,
147 LHADD_U_IMM_0_OPCODE_X1 = 25, 147 FETCHADDGEZ_RRR_0_OPCODE_X1 = 20,
148 LH_OPCODE_Y2 = 2, 148 FETCHADD_RRR_0_OPCODE_X1 = 21,
149 LH_UN_0_SHUN_0_OPCODE_X1 = 12, 149 FETCHAND4_RRR_0_OPCODE_X1 = 22,
150 LH_U_OPCODE_Y2 = 3, 150 FETCHAND_RRR_0_OPCODE_X1 = 23,
151 LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, 151 FETCHOR4_RRR_0_OPCODE_X1 = 24,
152 LNK_SPECIAL_0_OPCODE_X1 = 13, 152 FETCHOR_RRR_0_OPCODE_X1 = 25,
153 LWADD_IMM_0_OPCODE_X1 = 26, 153 FINV_UNARY_OPCODE_X1 = 3,
154 LWADD_NA_IMM_0_OPCODE_X1 = 27, 154 FLUSHWB_UNARY_OPCODE_X1 = 4,
155 LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, 155 FLUSH_UNARY_OPCODE_X1 = 5,
156 LW_OPCODE_Y2 = 4, 156 FNOP_UNARY_OPCODE_X0 = 3,
157 LW_UN_0_SHUN_0_OPCODE_X1 = 14, 157 FNOP_UNARY_OPCODE_X1 = 6,
158 MAXB_U_SPECIAL_0_OPCODE_X0 = 15, 158 FNOP_UNARY_OPCODE_Y0 = 3,
159 MAXB_U_SPECIAL_0_OPCODE_X1 = 14, 159 FNOP_UNARY_OPCODE_Y1 = 8,
160 MAXH_SPECIAL_0_OPCODE_X0 = 16, 160 FSINGLE_ADD1_RRR_0_OPCODE_X0 = 34,
161 MAXH_SPECIAL_0_OPCODE_X1 = 15, 161 FSINGLE_ADDSUB2_RRR_0_OPCODE_X0 = 35,
162 MAXIB_U_IMM_0_OPCODE_X0 = 4, 162 FSINGLE_MUL1_RRR_0_OPCODE_X0 = 36,
163 MAXIB_U_IMM_0_OPCODE_X1 = 5, 163 FSINGLE_MUL2_RRR_0_OPCODE_X0 = 37,
164 MAXIH_IMM_0_OPCODE_X0 = 5, 164 FSINGLE_PACK1_UNARY_OPCODE_X0 = 4,
165 MAXIH_IMM_0_OPCODE_X1 = 6, 165 FSINGLE_PACK1_UNARY_OPCODE_Y0 = 4,
166 MFSPR_IMM_0_OPCODE_X1 = 7, 166 FSINGLE_PACK2_RRR_0_OPCODE_X0 = 38,
167 MF_UN_0_SHUN_0_OPCODE_X1 = 15, 167 FSINGLE_SUB1_RRR_0_OPCODE_X0 = 39,
168 MINB_U_SPECIAL_0_OPCODE_X0 = 17, 168 ICOH_UNARY_OPCODE_X1 = 7,
169 MINB_U_SPECIAL_0_OPCODE_X1 = 16, 169 ILL_UNARY_OPCODE_X1 = 8,
170 MINH_SPECIAL_0_OPCODE_X0 = 18, 170 ILL_UNARY_OPCODE_Y1 = 9,
171 MINH_SPECIAL_0_OPCODE_X1 = 17, 171 IMM8_OPCODE_X0 = 4,
172 MINIB_U_IMM_0_OPCODE_X0 = 6, 172 IMM8_OPCODE_X1 = 3,
173 MINIB_U_IMM_0_OPCODE_X1 = 8, 173 INV_UNARY_OPCODE_X1 = 9,
174 MINIH_IMM_0_OPCODE_X0 = 7, 174 IRET_UNARY_OPCODE_X1 = 10,
175 MINIH_IMM_0_OPCODE_X1 = 9, 175 JALRP_UNARY_OPCODE_X1 = 11,
176 MM_OPCODE_X0 = 6, 176 JALRP_UNARY_OPCODE_Y1 = 10,
177 MM_OPCODE_X1 = 7, 177 JALR_UNARY_OPCODE_X1 = 12,
178 MNZB_SPECIAL_0_OPCODE_X0 = 19, 178 JALR_UNARY_OPCODE_Y1 = 11,
179 MNZB_SPECIAL_0_OPCODE_X1 = 18, 179 JAL_JUMP_OPCODE_X1 = 0,
180 MNZH_SPECIAL_0_OPCODE_X0 = 20, 180 JRP_UNARY_OPCODE_X1 = 13,
181 MNZH_SPECIAL_0_OPCODE_X1 = 19, 181 JRP_UNARY_OPCODE_Y1 = 12,
182 MNZ_SPECIAL_0_OPCODE_X0 = 21, 182 JR_UNARY_OPCODE_X1 = 14,
183 MNZ_SPECIAL_0_OPCODE_X1 = 20, 183 JR_UNARY_OPCODE_Y1 = 13,
184 MNZ_SPECIAL_1_OPCODE_Y0 = 0, 184 JUMP_OPCODE_X1 = 4,
185 MNZ_SPECIAL_1_OPCODE_Y1 = 1, 185 J_JUMP_OPCODE_X1 = 1,
186 MOVEI_IMM_1_OPCODE_SN = 0, 186 LD1S_ADD_IMM8_OPCODE_X1 = 7,
187 MOVE_RR_IMM_0_OPCODE_SN = 8, 187 LD1S_OPCODE_Y2 = 0,
188 MTSPR_IMM_0_OPCODE_X1 = 10, 188 LD1S_UNARY_OPCODE_X1 = 15,
189 MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, 189 LD1U_ADD_IMM8_OPCODE_X1 = 8,
190 MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, 190 LD1U_OPCODE_Y2 = 1,
191 MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, 191 LD1U_UNARY_OPCODE_X1 = 16,
192 MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, 192 LD2S_ADD_IMM8_OPCODE_X1 = 9,
193 MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, 193 LD2S_OPCODE_Y2 = 2,
194 MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, 194 LD2S_UNARY_OPCODE_X1 = 17,
195 MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, 195 LD2U_ADD_IMM8_OPCODE_X1 = 10,
196 MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, 196 LD2U_OPCODE_Y2 = 3,
197 MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, 197 LD2U_UNARY_OPCODE_X1 = 18,
198 MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, 198 LD4S_ADD_IMM8_OPCODE_X1 = 11,
199 MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, 199 LD4S_OPCODE_Y2 = 1,
200 MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, 200 LD4S_UNARY_OPCODE_X1 = 19,
201 MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, 201 LD4U_ADD_IMM8_OPCODE_X1 = 12,
202 MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, 202 LD4U_OPCODE_Y2 = 2,
203 MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, 203 LD4U_UNARY_OPCODE_X1 = 20,
204 MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, 204 LDNA_UNARY_OPCODE_X1 = 21,
205 MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, 205 LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
206 MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, 206 LDNT1S_UNARY_OPCODE_X1 = 22,
207 MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, 207 LDNT1U_ADD_IMM8_OPCODE_X1 = 14,
208 MULHL_US_SPECIAL_0_OPCODE_X0 = 36, 208 LDNT1U_UNARY_OPCODE_X1 = 23,
209 MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, 209 LDNT2S_ADD_IMM8_OPCODE_X1 = 15,
210 MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, 210 LDNT2S_UNARY_OPCODE_X1 = 24,
211 MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, 211 LDNT2U_ADD_IMM8_OPCODE_X1 = 16,
212 MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, 212 LDNT2U_UNARY_OPCODE_X1 = 25,
213 MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, 213 LDNT4S_ADD_IMM8_OPCODE_X1 = 17,
214 MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, 214 LDNT4S_UNARY_OPCODE_X1 = 26,
215 MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, 215 LDNT4U_ADD_IMM8_OPCODE_X1 = 18,
216 MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, 216 LDNT4U_UNARY_OPCODE_X1 = 27,
217 MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, 217 LDNT_ADD_IMM8_OPCODE_X1 = 19,
218 MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, 218 LDNT_UNARY_OPCODE_X1 = 28,
219 MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, 219 LD_ADD_IMM8_OPCODE_X1 = 20,
220 MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, 220 LD_OPCODE_Y2 = 3,
221 MVNZ_SPECIAL_0_OPCODE_X0 = 45, 221 LD_UNARY_OPCODE_X1 = 29,
222 MVNZ_SPECIAL_1_OPCODE_Y0 = 1, 222 LNK_UNARY_OPCODE_X1 = 30,
223 MVZ_SPECIAL_0_OPCODE_X0 = 46, 223 LNK_UNARY_OPCODE_Y1 = 14,
224 MVZ_SPECIAL_1_OPCODE_Y0 = 2, 224 LWNA_ADD_IMM8_OPCODE_X1 = 21,
225 MZB_SPECIAL_0_OPCODE_X0 = 47, 225 MFSPR_IMM8_OPCODE_X1 = 22,
226 MZB_SPECIAL_0_OPCODE_X1 = 21, 226 MF_UNARY_OPCODE_X1 = 31,
227 MZH_SPECIAL_0_OPCODE_X0 = 48, 227 MM_BF_OPCODE_X0 = 7,
228 MZH_SPECIAL_0_OPCODE_X1 = 22, 228 MNZ_RRR_0_OPCODE_X0 = 40,
229 MZ_SPECIAL_0_OPCODE_X0 = 49, 229 MNZ_RRR_0_OPCODE_X1 = 26,
230 MZ_SPECIAL_0_OPCODE_X1 = 23, 230 MNZ_RRR_4_OPCODE_Y0 = 2,
231 MZ_SPECIAL_1_OPCODE_Y0 = 3, 231 MNZ_RRR_4_OPCODE_Y1 = 2,
232 MZ_SPECIAL_1_OPCODE_Y1 = 2, 232 MODE_OPCODE_YA2 = 1,
233 NAP_UN_0_SHUN_0_OPCODE_X1 = 16, 233 MODE_OPCODE_YB2 = 2,
234 NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, 234 MODE_OPCODE_YC2 = 3,
235 NOP_UN_0_SHUN_0_OPCODE_X0 = 6, 235 MTSPR_IMM8_OPCODE_X1 = 23,
236 NOP_UN_0_SHUN_0_OPCODE_X1 = 17, 236 MULAX_RRR_0_OPCODE_X0 = 41,
237 NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, 237 MULAX_RRR_3_OPCODE_Y0 = 2,
238 NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, 238 MULA_HS_HS_RRR_0_OPCODE_X0 = 42,
239 NOREG_RR_IMM_0_OPCODE_SN = 0, 239 MULA_HS_HS_RRR_9_OPCODE_Y0 = 0,
240 NOR_SPECIAL_0_OPCODE_X0 = 50, 240 MULA_HS_HU_RRR_0_OPCODE_X0 = 43,
241 NOR_SPECIAL_0_OPCODE_X1 = 24, 241 MULA_HS_LS_RRR_0_OPCODE_X0 = 44,
242 NOR_SPECIAL_2_OPCODE_Y0 = 1, 242 MULA_HS_LU_RRR_0_OPCODE_X0 = 45,
243 NOR_SPECIAL_2_OPCODE_Y1 = 1, 243 MULA_HU_HU_RRR_0_OPCODE_X0 = 46,
244 ORI_IMM_0_OPCODE_X0 = 8, 244 MULA_HU_HU_RRR_9_OPCODE_Y0 = 1,
245 ORI_IMM_0_OPCODE_X1 = 11, 245 MULA_HU_LS_RRR_0_OPCODE_X0 = 47,
246 ORI_OPCODE_Y0 = 11, 246 MULA_HU_LU_RRR_0_OPCODE_X0 = 48,
247 ORI_OPCODE_Y1 = 9, 247 MULA_LS_LS_RRR_0_OPCODE_X0 = 49,
248 OR_SPECIAL_0_OPCODE_X0 = 51, 248 MULA_LS_LS_RRR_9_OPCODE_Y0 = 2,
249 OR_SPECIAL_0_OPCODE_X1 = 25, 249 MULA_LS_LU_RRR_0_OPCODE_X0 = 50,
250 OR_SPECIAL_2_OPCODE_Y0 = 2, 250 MULA_LU_LU_RRR_0_OPCODE_X0 = 51,
251 OR_SPECIAL_2_OPCODE_Y1 = 2, 251 MULA_LU_LU_RRR_9_OPCODE_Y0 = 3,
252 PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, 252 MULX_RRR_0_OPCODE_X0 = 52,
253 PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, 253 MULX_RRR_3_OPCODE_Y0 = 3,
254 PACKHB_SPECIAL_0_OPCODE_X0 = 52, 254 MUL_HS_HS_RRR_0_OPCODE_X0 = 53,
255 PACKHB_SPECIAL_0_OPCODE_X1 = 26, 255 MUL_HS_HS_RRR_8_OPCODE_Y0 = 0,
256 PACKHS_SPECIAL_0_OPCODE_X0 = 102, 256 MUL_HS_HU_RRR_0_OPCODE_X0 = 54,
257 PACKHS_SPECIAL_0_OPCODE_X1 = 72, 257 MUL_HS_LS_RRR_0_OPCODE_X0 = 55,
258 PACKLB_SPECIAL_0_OPCODE_X0 = 53, 258 MUL_HS_LU_RRR_0_OPCODE_X0 = 56,
259 PACKLB_SPECIAL_0_OPCODE_X1 = 27, 259 MUL_HU_HU_RRR_0_OPCODE_X0 = 57,
260 PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, 260 MUL_HU_HU_RRR_8_OPCODE_Y0 = 1,
261 PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, 261 MUL_HU_LS_RRR_0_OPCODE_X0 = 58,
262 RLI_SHUN_0_OPCODE_X0 = 1, 262 MUL_HU_LU_RRR_0_OPCODE_X0 = 59,
263 RLI_SHUN_0_OPCODE_X1 = 1, 263 MUL_LS_LS_RRR_0_OPCODE_X0 = 60,
264 RLI_SHUN_0_OPCODE_Y0 = 1, 264 MUL_LS_LS_RRR_8_OPCODE_Y0 = 2,
265 RLI_SHUN_0_OPCODE_Y1 = 1, 265 MUL_LS_LU_RRR_0_OPCODE_X0 = 61,
266 RL_SPECIAL_0_OPCODE_X0 = 54, 266 MUL_LU_LU_RRR_0_OPCODE_X0 = 62,
267 RL_SPECIAL_0_OPCODE_X1 = 28, 267 MUL_LU_LU_RRR_8_OPCODE_Y0 = 3,
268 RL_SPECIAL_3_OPCODE_Y0 = 0, 268 MZ_RRR_0_OPCODE_X0 = 63,
269 RL_SPECIAL_3_OPCODE_Y1 = 0, 269 MZ_RRR_0_OPCODE_X1 = 27,
270 RR_IMM_0_OPCODE_SN = 0, 270 MZ_RRR_4_OPCODE_Y0 = 3,
271 S1A_SPECIAL_0_OPCODE_X0 = 55, 271 MZ_RRR_4_OPCODE_Y1 = 3,
272 S1A_SPECIAL_0_OPCODE_X1 = 29, 272 NAP_UNARY_OPCODE_X1 = 32,
273 S1A_SPECIAL_0_OPCODE_Y0 = 1, 273 NOP_UNARY_OPCODE_X0 = 5,
274 S1A_SPECIAL_0_OPCODE_Y1 = 1, 274 NOP_UNARY_OPCODE_X1 = 33,
275 S2A_SPECIAL_0_OPCODE_X0 = 56, 275 NOP_UNARY_OPCODE_Y0 = 5,
276 S2A_SPECIAL_0_OPCODE_X1 = 30, 276 NOP_UNARY_OPCODE_Y1 = 15,
277 S2A_SPECIAL_0_OPCODE_Y0 = 2, 277 NOR_RRR_0_OPCODE_X0 = 64,
278 S2A_SPECIAL_0_OPCODE_Y1 = 2, 278 NOR_RRR_0_OPCODE_X1 = 28,
279 S3A_SPECIAL_0_OPCODE_X0 = 57, 279 NOR_RRR_5_OPCODE_Y0 = 1,
280 S3A_SPECIAL_0_OPCODE_X1 = 31, 280 NOR_RRR_5_OPCODE_Y1 = 1,
281 S3A_SPECIAL_5_OPCODE_Y0 = 1, 281 ORI_IMM8_OPCODE_X0 = 7,
282 S3A_SPECIAL_5_OPCODE_Y1 = 1, 282 ORI_IMM8_OPCODE_X1 = 24,
283 SADAB_U_SPECIAL_0_OPCODE_X0 = 58, 283 OR_RRR_0_OPCODE_X0 = 65,
284 SADAH_SPECIAL_0_OPCODE_X0 = 59, 284 OR_RRR_0_OPCODE_X1 = 29,
285 SADAH_U_SPECIAL_0_OPCODE_X0 = 60, 285 OR_RRR_5_OPCODE_Y0 = 2,
286 SADB_U_SPECIAL_0_OPCODE_X0 = 61, 286 OR_RRR_5_OPCODE_Y1 = 2,
287 SADH_SPECIAL_0_OPCODE_X0 = 62, 287 PCNT_UNARY_OPCODE_X0 = 6,
288 SADH_U_SPECIAL_0_OPCODE_X0 = 63, 288 PCNT_UNARY_OPCODE_Y0 = 6,
289 SBADD_IMM_0_OPCODE_X1 = 28, 289 REVBITS_UNARY_OPCODE_X0 = 7,
290 SB_OPCODE_Y2 = 5, 290 REVBITS_UNARY_OPCODE_Y0 = 7,
291 SB_SPECIAL_0_OPCODE_X1 = 32, 291 REVBYTES_UNARY_OPCODE_X0 = 8,
292 SEQB_SPECIAL_0_OPCODE_X0 = 64, 292 REVBYTES_UNARY_OPCODE_Y0 = 8,
293 SEQB_SPECIAL_0_OPCODE_X1 = 33, 293 ROTLI_SHIFT_OPCODE_X0 = 1,
294 SEQH_SPECIAL_0_OPCODE_X0 = 65, 294 ROTLI_SHIFT_OPCODE_X1 = 1,
295 SEQH_SPECIAL_0_OPCODE_X1 = 34, 295 ROTLI_SHIFT_OPCODE_Y0 = 0,
296 SEQIB_IMM_0_OPCODE_X0 = 9, 296 ROTLI_SHIFT_OPCODE_Y1 = 0,
297 SEQIB_IMM_0_OPCODE_X1 = 12, 297 ROTL_RRR_0_OPCODE_X0 = 66,
298 SEQIH_IMM_0_OPCODE_X0 = 10, 298 ROTL_RRR_0_OPCODE_X1 = 30,
299 SEQIH_IMM_0_OPCODE_X1 = 13, 299 ROTL_RRR_6_OPCODE_Y0 = 0,
300 SEQI_IMM_0_OPCODE_X0 = 11, 300 ROTL_RRR_6_OPCODE_Y1 = 0,
301 SEQI_IMM_0_OPCODE_X1 = 14, 301 RRR_0_OPCODE_X0 = 5,
302 SEQI_OPCODE_Y0 = 12, 302 RRR_0_OPCODE_X1 = 5,
303 SEQI_OPCODE_Y1 = 10, 303 RRR_0_OPCODE_Y0 = 5,
304 SEQ_SPECIAL_0_OPCODE_X0 = 66, 304 RRR_0_OPCODE_Y1 = 6,
305 SEQ_SPECIAL_0_OPCODE_X1 = 35, 305 RRR_1_OPCODE_Y0 = 6,
306 SEQ_SPECIAL_5_OPCODE_Y0 = 2, 306 RRR_1_OPCODE_Y1 = 7,
307 SEQ_SPECIAL_5_OPCODE_Y1 = 2, 307 RRR_2_OPCODE_Y0 = 7,
308 SHADD_IMM_0_OPCODE_X1 = 29, 308 RRR_2_OPCODE_Y1 = 8,
309 SHL8II_IMM_0_OPCODE_SN = 3, 309 RRR_3_OPCODE_Y0 = 8,
310 SHLB_SPECIAL_0_OPCODE_X0 = 67, 310 RRR_3_OPCODE_Y1 = 9,
311 SHLB_SPECIAL_0_OPCODE_X1 = 36, 311 RRR_4_OPCODE_Y0 = 9,
312 SHLH_SPECIAL_0_OPCODE_X0 = 68, 312 RRR_4_OPCODE_Y1 = 10,
313 SHLH_SPECIAL_0_OPCODE_X1 = 37, 313 RRR_5_OPCODE_Y0 = 10,
314 SHLIB_SHUN_0_OPCODE_X0 = 2, 314 RRR_5_OPCODE_Y1 = 11,
315 SHLIB_SHUN_0_OPCODE_X1 = 2, 315 RRR_6_OPCODE_Y0 = 11,
316 SHLIH_SHUN_0_OPCODE_X0 = 3, 316 RRR_6_OPCODE_Y1 = 12,
317 SHLIH_SHUN_0_OPCODE_X1 = 3, 317 RRR_7_OPCODE_Y0 = 12,
318 SHLI_SHUN_0_OPCODE_X0 = 4, 318 RRR_7_OPCODE_Y1 = 13,
319 SHLI_SHUN_0_OPCODE_X1 = 4, 319 RRR_8_OPCODE_Y0 = 13,
320 SHLI_SHUN_0_OPCODE_Y0 = 2, 320 RRR_9_OPCODE_Y0 = 14,
321 SHLI_SHUN_0_OPCODE_Y1 = 2, 321 SHIFT_OPCODE_X0 = 6,
322 SHL_SPECIAL_0_OPCODE_X0 = 69, 322 SHIFT_OPCODE_X1 = 6,
323 SHL_SPECIAL_0_OPCODE_X1 = 38, 323 SHIFT_OPCODE_Y0 = 15,
324 SHL_SPECIAL_3_OPCODE_Y0 = 1, 324 SHIFT_OPCODE_Y1 = 14,
325 SHL_SPECIAL_3_OPCODE_Y1 = 1, 325 SHL16INSLI_OPCODE_X0 = 7,
326 SHR1_RR_IMM_0_OPCODE_SN = 9, 326 SHL16INSLI_OPCODE_X1 = 7,
327 SHRB_SPECIAL_0_OPCODE_X0 = 70, 327 SHL1ADDX_RRR_0_OPCODE_X0 = 67,
328 SHRB_SPECIAL_0_OPCODE_X1 = 39, 328 SHL1ADDX_RRR_0_OPCODE_X1 = 31,
329 SHRH_SPECIAL_0_OPCODE_X0 = 71, 329 SHL1ADDX_RRR_7_OPCODE_Y0 = 1,
330 SHRH_SPECIAL_0_OPCODE_X1 = 40, 330 SHL1ADDX_RRR_7_OPCODE_Y1 = 1,
331 SHRIB_SHUN_0_OPCODE_X0 = 5, 331 SHL1ADD_RRR_0_OPCODE_X0 = 68,
332 SHRIB_SHUN_0_OPCODE_X1 = 5, 332 SHL1ADD_RRR_0_OPCODE_X1 = 32,
333 SHRIH_SHUN_0_OPCODE_X0 = 6, 333 SHL1ADD_RRR_1_OPCODE_Y0 = 0,
334 SHRIH_SHUN_0_OPCODE_X1 = 6, 334 SHL1ADD_RRR_1_OPCODE_Y1 = 0,
335 SHRI_SHUN_0_OPCODE_X0 = 7, 335 SHL2ADDX_RRR_0_OPCODE_X0 = 69,
336 SHRI_SHUN_0_OPCODE_X1 = 7, 336 SHL2ADDX_RRR_0_OPCODE_X1 = 33,
337 SHRI_SHUN_0_OPCODE_Y0 = 3, 337 SHL2ADDX_RRR_7_OPCODE_Y0 = 2,
338 SHRI_SHUN_0_OPCODE_Y1 = 3, 338 SHL2ADDX_RRR_7_OPCODE_Y1 = 2,
339 SHR_SPECIAL_0_OPCODE_X0 = 72, 339 SHL2ADD_RRR_0_OPCODE_X0 = 70,
340 SHR_SPECIAL_0_OPCODE_X1 = 41, 340 SHL2ADD_RRR_0_OPCODE_X1 = 34,
341 SHR_SPECIAL_3_OPCODE_Y0 = 2, 341 SHL2ADD_RRR_1_OPCODE_Y0 = 1,
342 SHR_SPECIAL_3_OPCODE_Y1 = 2, 342 SHL2ADD_RRR_1_OPCODE_Y1 = 1,
343 SHUN_0_OPCODE_X0 = 7, 343 SHL3ADDX_RRR_0_OPCODE_X0 = 71,
344 SHUN_0_OPCODE_X1 = 8, 344 SHL3ADDX_RRR_0_OPCODE_X1 = 35,
345 SHUN_0_OPCODE_Y0 = 13, 345 SHL3ADDX_RRR_7_OPCODE_Y0 = 3,
346 SHUN_0_OPCODE_Y1 = 11, 346 SHL3ADDX_RRR_7_OPCODE_Y1 = 3,
347 SH_OPCODE_Y2 = 6, 347 SHL3ADD_RRR_0_OPCODE_X0 = 72,
348 SH_SPECIAL_0_OPCODE_X1 = 42, 348 SHL3ADD_RRR_0_OPCODE_X1 = 36,
349 SLTB_SPECIAL_0_OPCODE_X0 = 73, 349 SHL3ADD_RRR_1_OPCODE_Y0 = 2,
350 SLTB_SPECIAL_0_OPCODE_X1 = 43, 350 SHL3ADD_RRR_1_OPCODE_Y1 = 2,
351 SLTB_U_SPECIAL_0_OPCODE_X0 = 74, 351 SHLI_SHIFT_OPCODE_X0 = 2,
352 SLTB_U_SPECIAL_0_OPCODE_X1 = 44, 352 SHLI_SHIFT_OPCODE_X1 = 2,
353 SLTEB_SPECIAL_0_OPCODE_X0 = 75, 353 SHLI_SHIFT_OPCODE_Y0 = 1,
354 SLTEB_SPECIAL_0_OPCODE_X1 = 45, 354 SHLI_SHIFT_OPCODE_Y1 = 1,
355 SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, 355 SHLXI_SHIFT_OPCODE_X0 = 3,
356 SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, 356 SHLXI_SHIFT_OPCODE_X1 = 3,
357 SLTEH_SPECIAL_0_OPCODE_X0 = 77, 357 SHLX_RRR_0_OPCODE_X0 = 73,
358 SLTEH_SPECIAL_0_OPCODE_X1 = 47, 358 SHLX_RRR_0_OPCODE_X1 = 37,
359 SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, 359 SHL_RRR_0_OPCODE_X0 = 74,
360 SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, 360 SHL_RRR_0_OPCODE_X1 = 38,
361 SLTE_SPECIAL_0_OPCODE_X0 = 79, 361 SHL_RRR_6_OPCODE_Y0 = 1,
362 SLTE_SPECIAL_0_OPCODE_X1 = 49, 362 SHL_RRR_6_OPCODE_Y1 = 1,
363 SLTE_SPECIAL_4_OPCODE_Y0 = 0, 363 SHRSI_SHIFT_OPCODE_X0 = 4,
364 SLTE_SPECIAL_4_OPCODE_Y1 = 0, 364 SHRSI_SHIFT_OPCODE_X1 = 4,
365 SLTE_U_SPECIAL_0_OPCODE_X0 = 80, 365 SHRSI_SHIFT_OPCODE_Y0 = 2,
366 SLTE_U_SPECIAL_0_OPCODE_X1 = 50, 366 SHRSI_SHIFT_OPCODE_Y1 = 2,
367 SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, 367 SHRS_RRR_0_OPCODE_X0 = 75,
368 SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, 368 SHRS_RRR_0_OPCODE_X1 = 39,
369 SLTH_SPECIAL_0_OPCODE_X0 = 81, 369 SHRS_RRR_6_OPCODE_Y0 = 2,
370 SLTH_SPECIAL_0_OPCODE_X1 = 51, 370 SHRS_RRR_6_OPCODE_Y1 = 2,
371 SLTH_U_SPECIAL_0_OPCODE_X0 = 82, 371 SHRUI_SHIFT_OPCODE_X0 = 5,
372 SLTH_U_SPECIAL_0_OPCODE_X1 = 52, 372 SHRUI_SHIFT_OPCODE_X1 = 5,
373 SLTIB_IMM_0_OPCODE_X0 = 12, 373 SHRUI_SHIFT_OPCODE_Y0 = 3,
374 SLTIB_IMM_0_OPCODE_X1 = 15, 374 SHRUI_SHIFT_OPCODE_Y1 = 3,
375 SLTIB_U_IMM_0_OPCODE_X0 = 13, 375 SHRUXI_SHIFT_OPCODE_X0 = 6,
376 SLTIB_U_IMM_0_OPCODE_X1 = 16, 376 SHRUXI_SHIFT_OPCODE_X1 = 6,
377 SLTIH_IMM_0_OPCODE_X0 = 14, 377 SHRUX_RRR_0_OPCODE_X0 = 76,
378 SLTIH_IMM_0_OPCODE_X1 = 17, 378 SHRUX_RRR_0_OPCODE_X1 = 40,
379 SLTIH_U_IMM_0_OPCODE_X0 = 15, 379 SHRU_RRR_0_OPCODE_X0 = 77,
380 SLTIH_U_IMM_0_OPCODE_X1 = 18, 380 SHRU_RRR_0_OPCODE_X1 = 41,
381 SLTI_IMM_0_OPCODE_X0 = 16, 381 SHRU_RRR_6_OPCODE_Y0 = 3,
382 SLTI_IMM_0_OPCODE_X1 = 19, 382 SHRU_RRR_6_OPCODE_Y1 = 3,
383 SLTI_OPCODE_Y0 = 14, 383 SHUFFLEBYTES_RRR_0_OPCODE_X0 = 78,
384 SLTI_OPCODE_Y1 = 12, 384 ST1_ADD_IMM8_OPCODE_X1 = 25,
385 SLTI_U_IMM_0_OPCODE_X0 = 17, 385 ST1_OPCODE_Y2 = 0,
386 SLTI_U_IMM_0_OPCODE_X1 = 20, 386 ST1_RRR_0_OPCODE_X1 = 42,
387 SLTI_U_OPCODE_Y0 = 15, 387 ST2_ADD_IMM8_OPCODE_X1 = 26,
388 SLTI_U_OPCODE_Y1 = 13, 388 ST2_OPCODE_Y2 = 1,
389 SLT_SPECIAL_0_OPCODE_X0 = 83, 389 ST2_RRR_0_OPCODE_X1 = 43,
390 SLT_SPECIAL_0_OPCODE_X1 = 53, 390 ST4_ADD_IMM8_OPCODE_X1 = 27,
391 SLT_SPECIAL_4_OPCODE_Y0 = 2, 391 ST4_OPCODE_Y2 = 2,
392 SLT_SPECIAL_4_OPCODE_Y1 = 2, 392 ST4_RRR_0_OPCODE_X1 = 44,
393 SLT_U_SPECIAL_0_OPCODE_X0 = 84, 393 STNT1_ADD_IMM8_OPCODE_X1 = 28,
394 SLT_U_SPECIAL_0_OPCODE_X1 = 54, 394 STNT1_RRR_0_OPCODE_X1 = 45,
395 SLT_U_SPECIAL_4_OPCODE_Y0 = 3, 395 STNT2_ADD_IMM8_OPCODE_X1 = 29,
396 SLT_U_SPECIAL_4_OPCODE_Y1 = 3, 396 STNT2_RRR_0_OPCODE_X1 = 46,
397 SNEB_SPECIAL_0_OPCODE_X0 = 85, 397 STNT4_ADD_IMM8_OPCODE_X1 = 30,
398 SNEB_SPECIAL_0_OPCODE_X1 = 55, 398 STNT4_RRR_0_OPCODE_X1 = 47,
399 SNEH_SPECIAL_0_OPCODE_X0 = 86, 399 STNT_ADD_IMM8_OPCODE_X1 = 31,
400 SNEH_SPECIAL_0_OPCODE_X1 = 56, 400 STNT_RRR_0_OPCODE_X1 = 48,
401 SNE_SPECIAL_0_OPCODE_X0 = 87, 401 ST_ADD_IMM8_OPCODE_X1 = 32,
402 SNE_SPECIAL_0_OPCODE_X1 = 57, 402 ST_OPCODE_Y2 = 3,
403 SNE_SPECIAL_5_OPCODE_Y0 = 3, 403 ST_RRR_0_OPCODE_X1 = 49,
404 SNE_SPECIAL_5_OPCODE_Y1 = 3, 404 SUBXSC_RRR_0_OPCODE_X0 = 79,
405 SPECIAL_0_OPCODE_X0 = 0, 405 SUBXSC_RRR_0_OPCODE_X1 = 50,
406 SPECIAL_0_OPCODE_X1 = 1, 406 SUBX_RRR_0_OPCODE_X0 = 80,
407 SPECIAL_0_OPCODE_Y0 = 1, 407 SUBX_RRR_0_OPCODE_X1 = 51,
408 SPECIAL_0_OPCODE_Y1 = 1, 408 SUBX_RRR_0_OPCODE_Y0 = 2,
409 SPECIAL_1_OPCODE_Y0 = 2, 409 SUBX_RRR_0_OPCODE_Y1 = 2,
410 SPECIAL_1_OPCODE_Y1 = 2, 410 SUB_RRR_0_OPCODE_X0 = 81,
411 SPECIAL_2_OPCODE_Y0 = 3, 411 SUB_RRR_0_OPCODE_X1 = 52,
412 SPECIAL_2_OPCODE_Y1 = 3, 412 SUB_RRR_0_OPCODE_Y0 = 3,
413 SPECIAL_3_OPCODE_Y0 = 4, 413 SUB_RRR_0_OPCODE_Y1 = 3,
414 SPECIAL_3_OPCODE_Y1 = 4, 414 SWINT0_UNARY_OPCODE_X1 = 34,
415 SPECIAL_4_OPCODE_Y0 = 5, 415 SWINT1_UNARY_OPCODE_X1 = 35,
416 SPECIAL_4_OPCODE_Y1 = 5, 416 SWINT2_UNARY_OPCODE_X1 = 36,
417 SPECIAL_5_OPCODE_Y0 = 6, 417 SWINT3_UNARY_OPCODE_X1 = 37,
418 SPECIAL_5_OPCODE_Y1 = 6, 418 TBLIDXB0_UNARY_OPCODE_X0 = 9,
419 SPECIAL_6_OPCODE_Y0 = 7, 419 TBLIDXB0_UNARY_OPCODE_Y0 = 9,
420 SPECIAL_7_OPCODE_Y0 = 8, 420 TBLIDXB1_UNARY_OPCODE_X0 = 10,
421 SRAB_SPECIAL_0_OPCODE_X0 = 88, 421 TBLIDXB1_UNARY_OPCODE_Y0 = 10,
422 SRAB_SPECIAL_0_OPCODE_X1 = 58, 422 TBLIDXB2_UNARY_OPCODE_X0 = 11,
423 SRAH_SPECIAL_0_OPCODE_X0 = 89, 423 TBLIDXB2_UNARY_OPCODE_Y0 = 11,
424 SRAH_SPECIAL_0_OPCODE_X1 = 59, 424 TBLIDXB3_UNARY_OPCODE_X0 = 12,
425 SRAIB_SHUN_0_OPCODE_X0 = 8, 425 TBLIDXB3_UNARY_OPCODE_Y0 = 12,
426 SRAIB_SHUN_0_OPCODE_X1 = 8, 426 UNARY_RRR_0_OPCODE_X0 = 82,
427 SRAIH_SHUN_0_OPCODE_X0 = 9, 427 UNARY_RRR_0_OPCODE_X1 = 53,
428 SRAIH_SHUN_0_OPCODE_X1 = 9, 428 UNARY_RRR_1_OPCODE_Y0 = 3,
429 SRAI_SHUN_0_OPCODE_X0 = 10, 429 UNARY_RRR_1_OPCODE_Y1 = 3,
430 SRAI_SHUN_0_OPCODE_X1 = 10, 430 V1ADDI_IMM8_OPCODE_X0 = 8,
431 SRAI_SHUN_0_OPCODE_Y0 = 4, 431 V1ADDI_IMM8_OPCODE_X1 = 33,
432 SRAI_SHUN_0_OPCODE_Y1 = 4, 432 V1ADDUC_RRR_0_OPCODE_X0 = 83,
433 SRA_SPECIAL_0_OPCODE_X0 = 90, 433 V1ADDUC_RRR_0_OPCODE_X1 = 54,
434 SRA_SPECIAL_0_OPCODE_X1 = 60, 434 V1ADD_RRR_0_OPCODE_X0 = 84,
435 SRA_SPECIAL_3_OPCODE_Y0 = 3, 435 V1ADD_RRR_0_OPCODE_X1 = 55,
436 SRA_SPECIAL_3_OPCODE_Y1 = 3, 436 V1ADIFFU_RRR_0_OPCODE_X0 = 85,
437 SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, 437 V1AVGU_RRR_0_OPCODE_X0 = 86,
438 SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, 438 V1CMPEQI_IMM8_OPCODE_X0 = 9,
439 SUBB_SPECIAL_0_OPCODE_X0 = 91, 439 V1CMPEQI_IMM8_OPCODE_X1 = 34,
440 SUBB_SPECIAL_0_OPCODE_X1 = 61, 440 V1CMPEQ_RRR_0_OPCODE_X0 = 87,
441 SUBHS_SPECIAL_0_OPCODE_X0 = 101, 441 V1CMPEQ_RRR_0_OPCODE_X1 = 56,
442 SUBHS_SPECIAL_0_OPCODE_X1 = 71, 442 V1CMPLES_RRR_0_OPCODE_X0 = 88,
443 SUBH_SPECIAL_0_OPCODE_X0 = 92, 443 V1CMPLES_RRR_0_OPCODE_X1 = 57,
444 SUBH_SPECIAL_0_OPCODE_X1 = 62, 444 V1CMPLEU_RRR_0_OPCODE_X0 = 89,
445 SUBS_SPECIAL_0_OPCODE_X0 = 97, 445 V1CMPLEU_RRR_0_OPCODE_X1 = 58,
446 SUBS_SPECIAL_0_OPCODE_X1 = 67, 446 V1CMPLTSI_IMM8_OPCODE_X0 = 10,
447 SUB_SPECIAL_0_OPCODE_X0 = 93, 447 V1CMPLTSI_IMM8_OPCODE_X1 = 35,
448 SUB_SPECIAL_0_OPCODE_X1 = 63, 448 V1CMPLTS_RRR_0_OPCODE_X0 = 90,
449 SUB_SPECIAL_0_OPCODE_Y0 = 3, 449 V1CMPLTS_RRR_0_OPCODE_X1 = 59,
450 SUB_SPECIAL_0_OPCODE_Y1 = 3, 450 V1CMPLTUI_IMM8_OPCODE_X0 = 11,
451 SWADD_IMM_0_OPCODE_X1 = 30, 451 V1CMPLTUI_IMM8_OPCODE_X1 = 36,
452 SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, 452 V1CMPLTU_RRR_0_OPCODE_X0 = 91,
453 SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, 453 V1CMPLTU_RRR_0_OPCODE_X1 = 60,
454 SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, 454 V1CMPNE_RRR_0_OPCODE_X0 = 92,
455 SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, 455 V1CMPNE_RRR_0_OPCODE_X1 = 61,
456 SW_OPCODE_Y2 = 7, 456 V1DDOTPUA_RRR_0_OPCODE_X0 = 161,
457 SW_SPECIAL_0_OPCODE_X1 = 64, 457 V1DDOTPUSA_RRR_0_OPCODE_X0 = 93,
458 TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, 458 V1DDOTPUS_RRR_0_OPCODE_X0 = 94,
459 TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, 459 V1DDOTPU_RRR_0_OPCODE_X0 = 162,
460 TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, 460 V1DOTPA_RRR_0_OPCODE_X0 = 95,
461 TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, 461 V1DOTPUA_RRR_0_OPCODE_X0 = 163,
462 TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, 462 V1DOTPUSA_RRR_0_OPCODE_X0 = 96,
463 TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, 463 V1DOTPUS_RRR_0_OPCODE_X0 = 97,
464 TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, 464 V1DOTPU_RRR_0_OPCODE_X0 = 164,
465 TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, 465 V1DOTP_RRR_0_OPCODE_X0 = 98,
466 TNS_UN_0_SHUN_0_OPCODE_X1 = 22, 466 V1INT_H_RRR_0_OPCODE_X0 = 99,
467 UN_0_SHUN_0_OPCODE_X0 = 11, 467 V1INT_H_RRR_0_OPCODE_X1 = 62,
468 UN_0_SHUN_0_OPCODE_X1 = 11, 468 V1INT_L_RRR_0_OPCODE_X0 = 100,
469 UN_0_SHUN_0_OPCODE_Y0 = 5, 469 V1INT_L_RRR_0_OPCODE_X1 = 63,
470 UN_0_SHUN_0_OPCODE_Y1 = 5, 470 V1MAXUI_IMM8_OPCODE_X0 = 12,
471 WH64_UN_0_SHUN_0_OPCODE_X1 = 23, 471 V1MAXUI_IMM8_OPCODE_X1 = 37,
472 XORI_IMM_0_OPCODE_X0 = 2, 472 V1MAXU_RRR_0_OPCODE_X0 = 101,
473 XORI_IMM_0_OPCODE_X1 = 21, 473 V1MAXU_RRR_0_OPCODE_X1 = 64,
474 XOR_SPECIAL_0_OPCODE_X0 = 94, 474 V1MINUI_IMM8_OPCODE_X0 = 13,
475 XOR_SPECIAL_0_OPCODE_X1 = 65, 475 V1MINUI_IMM8_OPCODE_X1 = 38,
476 XOR_SPECIAL_2_OPCODE_Y0 = 3, 476 V1MINU_RRR_0_OPCODE_X0 = 102,
477 XOR_SPECIAL_2_OPCODE_Y1 = 3 477 V1MINU_RRR_0_OPCODE_X1 = 65,
478 V1MNZ_RRR_0_OPCODE_X0 = 103,
479 V1MNZ_RRR_0_OPCODE_X1 = 66,
480 V1MULTU_RRR_0_OPCODE_X0 = 104,
481 V1MULUS_RRR_0_OPCODE_X0 = 105,
482 V1MULU_RRR_0_OPCODE_X0 = 106,
483 V1MZ_RRR_0_OPCODE_X0 = 107,
484 V1MZ_RRR_0_OPCODE_X1 = 67,
485 V1SADAU_RRR_0_OPCODE_X0 = 108,
486 V1SADU_RRR_0_OPCODE_X0 = 109,
487 V1SHLI_SHIFT_OPCODE_X0 = 7,
488 V1SHLI_SHIFT_OPCODE_X1 = 7,
489 V1SHL_RRR_0_OPCODE_X0 = 110,
490 V1SHL_RRR_0_OPCODE_X1 = 68,
491 V1SHRSI_SHIFT_OPCODE_X0 = 8,
492 V1SHRSI_SHIFT_OPCODE_X1 = 8,
493 V1SHRS_RRR_0_OPCODE_X0 = 111,
494 V1SHRS_RRR_0_OPCODE_X1 = 69,
495 V1SHRUI_SHIFT_OPCODE_X0 = 9,
496 V1SHRUI_SHIFT_OPCODE_X1 = 9,
497 V1SHRU_RRR_0_OPCODE_X0 = 112,
498 V1SHRU_RRR_0_OPCODE_X1 = 70,
499 V1SUBUC_RRR_0_OPCODE_X0 = 113,
500 V1SUBUC_RRR_0_OPCODE_X1 = 71,
501 V1SUB_RRR_0_OPCODE_X0 = 114,
502 V1SUB_RRR_0_OPCODE_X1 = 72,
503 V2ADDI_IMM8_OPCODE_X0 = 14,
504 V2ADDI_IMM8_OPCODE_X1 = 39,
505 V2ADDSC_RRR_0_OPCODE_X0 = 115,
506 V2ADDSC_RRR_0_OPCODE_X1 = 73,
507 V2ADD_RRR_0_OPCODE_X0 = 116,
508 V2ADD_RRR_0_OPCODE_X1 = 74,
509 V2ADIFFS_RRR_0_OPCODE_X0 = 117,
510 V2AVGS_RRR_0_OPCODE_X0 = 118,
511 V2CMPEQI_IMM8_OPCODE_X0 = 15,
512 V2CMPEQI_IMM8_OPCODE_X1 = 40,
513 V2CMPEQ_RRR_0_OPCODE_X0 = 119,
514 V2CMPEQ_RRR_0_OPCODE_X1 = 75,
515 V2CMPLES_RRR_0_OPCODE_X0 = 120,
516 V2CMPLES_RRR_0_OPCODE_X1 = 76,
517 V2CMPLEU_RRR_0_OPCODE_X0 = 121,
518 V2CMPLEU_RRR_0_OPCODE_X1 = 77,
519 V2CMPLTSI_IMM8_OPCODE_X0 = 16,
520 V2CMPLTSI_IMM8_OPCODE_X1 = 41,
521 V2CMPLTS_RRR_0_OPCODE_X0 = 122,
522 V2CMPLTS_RRR_0_OPCODE_X1 = 78,
523 V2CMPLTUI_IMM8_OPCODE_X0 = 17,
524 V2CMPLTUI_IMM8_OPCODE_X1 = 42,
525 V2CMPLTU_RRR_0_OPCODE_X0 = 123,
526 V2CMPLTU_RRR_0_OPCODE_X1 = 79,
527 V2CMPNE_RRR_0_OPCODE_X0 = 124,
528 V2CMPNE_RRR_0_OPCODE_X1 = 80,
529 V2DOTPA_RRR_0_OPCODE_X0 = 125,
530 V2DOTP_RRR_0_OPCODE_X0 = 126,
531 V2INT_H_RRR_0_OPCODE_X0 = 127,
532 V2INT_H_RRR_0_OPCODE_X1 = 81,
533 V2INT_L_RRR_0_OPCODE_X0 = 128,
534 V2INT_L_RRR_0_OPCODE_X1 = 82,
535 V2MAXSI_IMM8_OPCODE_X0 = 18,
536 V2MAXSI_IMM8_OPCODE_X1 = 43,
537 V2MAXS_RRR_0_OPCODE_X0 = 129,
538 V2MAXS_RRR_0_OPCODE_X1 = 83,
539 V2MINSI_IMM8_OPCODE_X0 = 19,
540 V2MINSI_IMM8_OPCODE_X1 = 44,
541 V2MINS_RRR_0_OPCODE_X0 = 130,
542 V2MINS_RRR_0_OPCODE_X1 = 84,
543 V2MNZ_RRR_0_OPCODE_X0 = 131,
544 V2MNZ_RRR_0_OPCODE_X1 = 85,
545 V2MULFSC_RRR_0_OPCODE_X0 = 132,
546 V2MULS_RRR_0_OPCODE_X0 = 133,
547 V2MULTS_RRR_0_OPCODE_X0 = 134,
548 V2MZ_RRR_0_OPCODE_X0 = 135,
549 V2MZ_RRR_0_OPCODE_X1 = 86,
550 V2PACKH_RRR_0_OPCODE_X0 = 136,
551 V2PACKH_RRR_0_OPCODE_X1 = 87,
552 V2PACKL_RRR_0_OPCODE_X0 = 137,
553 V2PACKL_RRR_0_OPCODE_X1 = 88,
554 V2PACKUC_RRR_0_OPCODE_X0 = 138,
555 V2PACKUC_RRR_0_OPCODE_X1 = 89,
556 V2SADAS_RRR_0_OPCODE_X0 = 139,
557 V2SADAU_RRR_0_OPCODE_X0 = 140,
558 V2SADS_RRR_0_OPCODE_X0 = 141,
559 V2SADU_RRR_0_OPCODE_X0 = 142,
560 V2SHLI_SHIFT_OPCODE_X0 = 10,
561 V2SHLI_SHIFT_OPCODE_X1 = 10,
562 V2SHLSC_RRR_0_OPCODE_X0 = 143,
563 V2SHLSC_RRR_0_OPCODE_X1 = 90,
564 V2SHL_RRR_0_OPCODE_X0 = 144,
565 V2SHL_RRR_0_OPCODE_X1 = 91,
566 V2SHRSI_SHIFT_OPCODE_X0 = 11,
567 V2SHRSI_SHIFT_OPCODE_X1 = 11,
568 V2SHRS_RRR_0_OPCODE_X0 = 145,
569 V2SHRS_RRR_0_OPCODE_X1 = 92,
570 V2SHRUI_SHIFT_OPCODE_X0 = 12,
571 V2SHRUI_SHIFT_OPCODE_X1 = 12,
572 V2SHRU_RRR_0_OPCODE_X0 = 146,
573 V2SHRU_RRR_0_OPCODE_X1 = 93,
574 V2SUBSC_RRR_0_OPCODE_X0 = 147,
575 V2SUBSC_RRR_0_OPCODE_X1 = 94,
576 V2SUB_RRR_0_OPCODE_X0 = 148,
577 V2SUB_RRR_0_OPCODE_X1 = 95,
578 V4ADDSC_RRR_0_OPCODE_X0 = 149,
579 V4ADDSC_RRR_0_OPCODE_X1 = 96,
580 V4ADD_RRR_0_OPCODE_X0 = 150,
581 V4ADD_RRR_0_OPCODE_X1 = 97,
582 V4INT_H_RRR_0_OPCODE_X0 = 151,
583 V4INT_H_RRR_0_OPCODE_X1 = 98,
584 V4INT_L_RRR_0_OPCODE_X0 = 152,
585 V4INT_L_RRR_0_OPCODE_X1 = 99,
586 V4PACKSC_RRR_0_OPCODE_X0 = 153,
587 V4PACKSC_RRR_0_OPCODE_X1 = 100,
588 V4SHLSC_RRR_0_OPCODE_X0 = 154,
589 V4SHLSC_RRR_0_OPCODE_X1 = 101,
590 V4SHL_RRR_0_OPCODE_X0 = 155,
591 V4SHL_RRR_0_OPCODE_X1 = 102,
592 V4SHRS_RRR_0_OPCODE_X0 = 156,
593 V4SHRS_RRR_0_OPCODE_X1 = 103,
594 V4SHRU_RRR_0_OPCODE_X0 = 157,
595 V4SHRU_RRR_0_OPCODE_X1 = 104,
596 V4SUBSC_RRR_0_OPCODE_X0 = 158,
597 V4SUBSC_RRR_0_OPCODE_X1 = 105,
598 V4SUB_RRR_0_OPCODE_X0 = 159,
599 V4SUB_RRR_0_OPCODE_X1 = 106,
600 WH64_UNARY_OPCODE_X1 = 38,
601 XORI_IMM8_OPCODE_X0 = 20,
602 XORI_IMM8_OPCODE_X1 = 45,
603 XOR_RRR_0_OPCODE_X0 = 160,
604 XOR_RRR_0_OPCODE_X1 = 107,
605 XOR_RRR_5_OPCODE_Y0 = 3,
606 XOR_RRR_5_OPCODE_Y1 = 3
478}; 607};
479 608
480#endif /* !_TILE_OPCODE_CONSTANTS_H */ 609#endif /* !_TILE_OPCODE_CONSTANTS_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 3eb53525bf9d..db93518fac03 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -16,7 +16,8 @@
16#define _ASM_TILE_PAGE_H 16#define _ASM_TILE_PAGE_H
17 17
18#include <linux/const.h> 18#include <linux/const.h>
19#include <hv/pagesize.h> 19#include <hv/hypervisor.h>
20#include <arch/chip.h>
20 21
21/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ 22/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
22#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL 23#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
@@ -28,8 +29,6 @@
28#define PAGE_MASK (~(PAGE_SIZE - 1)) 29#define PAGE_MASK (~(PAGE_SIZE - 1))
29#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 30#define HPAGE_MASK (~(HPAGE_SIZE - 1))
30 31
31#ifdef __KERNEL__
32
33/* 32/*
34 * If the Kconfig doesn't specify, set a maximum zone order that 33 * If the Kconfig doesn't specify, set a maximum zone order that
35 * is enough so that we can create huge pages from small pages given 34 * is enough so that we can create huge pages from small pages given
@@ -39,9 +38,6 @@
39#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1) 38#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
40#endif 39#endif
41 40
42#include <hv/hypervisor.h>
43#include <arch/chip.h>
44
45#ifndef __ASSEMBLY__ 41#ifndef __ASSEMBLY__
46 42
47#include <linux/types.h> 43#include <linux/types.h>
@@ -91,6 +87,10 @@ typedef struct page *pgtable_t;
91/* Must be a macro since it is used to create constants. */ 87/* Must be a macro since it is used to create constants. */
92#define __pgprot(val) hv_pte(val) 88#define __pgprot(val) hv_pte(val)
93 89
90/* Rarely-used initializers, typically with a "zero" value. */
91#define __pte(x) hv_pte(x)
92#define __pgd(x) hv_pte(x)
93
94static inline u64 pgprot_val(pgprot_t pgprot) 94static inline u64 pgprot_val(pgprot_t pgprot)
95{ 95{
96 return hv_pte_val(pgprot); 96 return hv_pte_val(pgprot);
@@ -110,6 +110,8 @@ static inline u64 pgd_val(pgd_t pgd)
110 110
111typedef HV_PTE pmd_t; 111typedef HV_PTE pmd_t;
112 112
113#define __pmd(x) hv_pte(x)
114
113static inline u64 pmd_val(pmd_t pmd) 115static inline u64 pmd_val(pmd_t pmd)
114{ 116{
115 return hv_pte_val(pmd); 117 return hv_pte_val(pmd);
@@ -318,7 +320,7 @@ static inline int pfn_valid(unsigned long pfn)
318 320
319/* Provide as macros since these require some other headers included. */ 321/* Provide as macros since these require some other headers included. */
320#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT) 322#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
321#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn(kaddr)) 323#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
322#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page)) 324#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
323 325
324struct mm_struct; 326struct mm_struct;
@@ -331,6 +333,4 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
331 333
332#include <asm-generic/memory_model.h> 334#include <asm-generic/memory_model.h>
333 335
334#endif /* __KERNEL__ */
335
336#endif /* _ASM_TILE_PAGE_H */ 336#endif /* _ASM_TILE_PAGE_H */
diff --git a/arch/tile/include/asm/parport.h b/arch/tile/include/asm/parport.h
new file mode 100644
index 000000000000..cf252af64590
--- /dev/null
+++ b/arch/tile/include/asm/parport.h
@@ -0,0 +1 @@
#include <asm-generic/parport.h>
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
index c3fc458a0d32..7f03cefed1b9 100644
--- a/arch/tile/include/asm/pci.h
+++ b/arch/tile/include/asm/pci.h
@@ -46,7 +46,8 @@ struct pci_controller {
46 */ 46 */
47#define PCI_DMA_BUS_IS_PHYS 1 47#define PCI_DMA_BUS_IS_PHYS 1
48 48
49int __init tile_pci_init(void); 49int __devinit tile_pci_init(void);
50int __devinit pcibios_init(void);
50 51
51void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 52void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
52static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} 53static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
new file mode 100644
index 000000000000..fd80328523b4
--- /dev/null
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#ifndef _ASM_TILE_PGTABLE_64_H
17#define _ASM_TILE_PGTABLE_64_H
18
19/* The level-0 page table breaks the address space into 32-bit chunks. */
20#define PGDIR_SHIFT HV_LOG2_L1_SPAN
21#define PGDIR_SIZE HV_L1_SPAN
22#define PGDIR_MASK (~(PGDIR_SIZE-1))
23#define PTRS_PER_PGD HV_L0_ENTRIES
24#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
25
26/*
27 * The level-1 index is defined by the huge page size. A PMD is composed
28 * of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
29 */
30#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE
31#define PMD_SIZE HV_PAGE_SIZE_LARGE
32#define PMD_MASK (~(PMD_SIZE-1))
33#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT))
34#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t))
35
36/*
37 * The level-2 index is defined by the difference between the huge
38 * page size and the normal page size. A PTE is composed of
39 * PTRS_PER_PTE pte_t's and is the bottom level of the page table.
40 * Note that the hypervisor docs use PTE for what we call pte_t, so
41 * this nomenclature is somewhat confusing.
42 */
43#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
44#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
45
46/*
47 * Align the vmalloc area to an L2 page table, and leave a guard page
48 * at the beginning and end. The vmalloc code also puts in an internal
49 * guard page between each allocation.
50 */
51#define _VMALLOC_END HUGE_VMAP_BASE
52#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
53#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
54
55#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
56
57#ifndef __ASSEMBLY__
58
59/* We have no pud since we are a three-level page table. */
60#include <asm-generic/pgtable-nopud.h>
61
62static inline int pud_none(pud_t pud)
63{
64 return pud_val(pud) == 0;
65}
66
67static inline int pud_present(pud_t pud)
68{
69 return pud_val(pud) & _PAGE_PRESENT;
70}
71
72#define pmd_ERROR(e) \
73 pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
74
75static inline void pud_clear(pud_t *pudp)
76{
77 __pte_clear(&pudp->pgd);
78}
79
80static inline int pud_bad(pud_t pud)
81{
82 return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE);
83}
84
85/* Return the page-table frame number (ptfn) that a pud_t points at. */
86#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
87
88/*
89 * A given kernel pud_t maps to a kernel pmd_t table at a specific
90 * virtual address. Since kernel pmd_t tables can be aligned at
91 * sub-page granularity, this macro can return non-page-aligned
92 * pointers, despite its name.
93 */
94#define pud_page_vaddr(pud) \
95 (__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN))
96
97/*
98 * A pud_t points to a pmd_t array. Since we can have multiple per
99 * page, we don't have a one-to-one mapping of pud_t's to pages.
100 */
101#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud)))
102
103static inline unsigned long pud_index(unsigned long address)
104{
105 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
106}
107
108#define pmd_offset(pud, address) \
109 ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
110
111static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
112{
113 set_pte(pmdp, pmdval);
114}
115
116/* Create a pmd from a PTFN and pgprot. */
117static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
118{
119 return hv_pte_set_ptfn(prot, ptfn);
120}
121
122/* Return the page-table frame number (ptfn) that a pmd_t points at. */
123static inline unsigned long pmd_ptfn(pmd_t pmd)
124{
125 return hv_pte_get_ptfn(pmd);
126}
127
128static inline void pmd_clear(pmd_t *pmdp)
129{
130 __pte_clear(pmdp);
131}
132
133/* Normalize an address to having the correct high bits set. */
134#define pgd_addr_normalize pgd_addr_normalize
135static inline unsigned long pgd_addr_normalize(unsigned long addr)
136{
137 return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >>
138 (CHIP_WORD_SIZE() - CHIP_VA_WIDTH());
139}
140
141/* We don't define any pgds for these addresses. */
142static inline int pgd_addr_invalid(unsigned long addr)
143{
144 return addr >= MEM_HV_START ||
145 (addr > MEM_LOW_END && addr < MEM_HIGH_START);
146}
147
148/*
149 * Use atomic instructions to provide atomicity against the hypervisor.
150 */
151#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
152static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
153 unsigned long addr, pte_t *ptep)
154{
155 return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >>
156 HV_PTE_INDEX_ACCESSED) & 0x1;
157}
158
159#define __HAVE_ARCH_PTEP_SET_WRPROTECT
160static inline void ptep_set_wrprotect(struct mm_struct *mm,
161 unsigned long addr, pte_t *ptep)
162{
163 __insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE);
164}
165
166#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
167static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
168 unsigned long addr, pte_t *ptep)
169{
170 return hv_pte(__insn_exch(&ptep->val, 0UL));
171}
172
173#endif /* __ASSEMBLY__ */
174
175#endif /* _ASM_TILE_PGTABLE_64_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index e6889474038a..34c1e01ffb5e 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -215,6 +215,8 @@ static inline void release_thread(struct task_struct *dead_task)
215 215
216extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 216extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
217 217
218extern int do_work_pending(struct pt_regs *regs, u32 flags);
219
218 220
219/* 221/*
220 * Return saved (kernel) PC of a blocked thread. 222 * Return saved (kernel) PC of a blocked thread.
@@ -255,10 +257,6 @@ static inline void cpu_relax(void)
255 barrier(); 257 barrier();
256} 258}
257 259
258struct siginfo;
259extern void arch_coredump_signal(struct siginfo *, struct pt_regs *);
260#define arch_coredump_signal arch_coredump_signal
261
262/* Info on this processor (see fs/proc/cpuinfo.c) */ 260/* Info on this processor (see fs/proc/cpuinfo.c) */
263struct seq_operations; 261struct seq_operations;
264extern const struct seq_operations cpuinfo_op; 262extern const struct seq_operations cpuinfo_op;
@@ -269,9 +267,6 @@ extern char chip_model[64];
269/* Data on which physical memory controller corresponds to which NUMA node. */ 267/* Data on which physical memory controller corresponds to which NUMA node. */
270extern int node_controller[]; 268extern int node_controller[];
271 269
272/* Do we dump information to the console when a user application crashes? */
273extern int show_crashinfo;
274
275#if CHIP_HAS_CBOX_HOME_MAP() 270#if CHIP_HAS_CBOX_HOME_MAP()
276/* Does the heap allocator return hash-for-home pages by default? */ 271/* Does the heap allocator return hash-for-home pages by default? */
277extern int hash_default; 272extern int hash_default;
diff --git a/arch/tile/include/asm/serial.h b/arch/tile/include/asm/serial.h
new file mode 100644
index 000000000000..a0cb0caff152
--- /dev/null
+++ b/arch/tile/include/asm/serial.h
@@ -0,0 +1 @@
#include <asm-generic/serial.h>
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h
index 81d92a45cd4b..1e1e616783eb 100644
--- a/arch/tile/include/asm/signal.h
+++ b/arch/tile/include/asm/signal.h
@@ -28,6 +28,10 @@ struct pt_regs;
28int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); 28int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
29int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); 29int setup_sigcontext(struct sigcontext __user *, struct pt_regs *);
30void do_signal(struct pt_regs *regs); 30void do_signal(struct pt_regs *regs);
31void signal_fault(const char *type, struct pt_regs *,
32 void __user *frame, int sig);
33void trace_unhandled_signal(const char *type, struct pt_regs *regs,
34 unsigned long address, int signo);
31#endif 35#endif
32 36
33#endif /* _ASM_TILE_SIGNAL_H */ 37#endif /* _ASM_TILE_SIGNAL_H */
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
new file mode 100644
index 000000000000..72be5904e020
--- /dev/null
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -0,0 +1,161 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
15 * (the type definitions are in asm/spinlock_types.h)
16 */
17
18#ifndef _ASM_TILE_SPINLOCK_64_H
19#define _ASM_TILE_SPINLOCK_64_H
20
21/* Shifts and masks for the various fields in "lock". */
22#define __ARCH_SPIN_CURRENT_SHIFT 17
23#define __ARCH_SPIN_NEXT_MASK 0x7fff
24#define __ARCH_SPIN_NEXT_OVERFLOW 0x8000
25
26/*
27 * Return the "current" portion of a ticket lock value,
28 * i.e. the number that currently owns the lock.
29 */
30static inline int arch_spin_current(u32 val)
31{
32 return val >> __ARCH_SPIN_CURRENT_SHIFT;
33}
34
35/*
36 * Return the "next" portion of a ticket lock value,
37 * i.e. the number that the next task to try to acquire the lock will get.
38 */
39static inline int arch_spin_next(u32 val)
40{
41 return val & __ARCH_SPIN_NEXT_MASK;
42}
43
44/* The lock is locked if a task would have to wait to get it. */
45static inline int arch_spin_is_locked(arch_spinlock_t *lock)
46{
47 u32 val = lock->lock;
48 return arch_spin_current(val) != arch_spin_next(val);
49}
50
51/* Bump the current ticket so the next task owns the lock. */
52static inline void arch_spin_unlock(arch_spinlock_t *lock)
53{
54 wmb(); /* guarantee anything modified under the lock is visible */
55 __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
56}
57
58void arch_spin_unlock_wait(arch_spinlock_t *lock);
59
60void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
61
62/* Grab the "next" ticket number and bump it atomically.
63 * If the current ticket is not ours, go to the slow path.
64 * We also take the slow path if the "next" value overflows.
65 */
66static inline void arch_spin_lock(arch_spinlock_t *lock)
67{
68 u32 val = __insn_fetchadd4(&lock->lock, 1);
69 u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
70 if (unlikely(arch_spin_current(val) != ticket))
71 arch_spin_lock_slow(lock, ticket);
72}
73
74/* Try to get the lock, and return whether we succeeded. */
75int arch_spin_trylock(arch_spinlock_t *lock);
76
77/* We cannot take an interrupt after getting a ticket, so don't enable them. */
78#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
79
80/*
81 * Read-write spinlocks, allowing multiple readers
82 * but only one writer.
83 *
84 * We use fetchadd() for readers, and fetchor() with the sign bit
85 * for writers.
86 */
87
88#define __WRITE_LOCK_BIT (1 << 31)
89
90static inline int arch_write_val_locked(int val)
91{
92 return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
93}
94
95/**
96 * read_can_lock - would read_trylock() succeed?
97 * @lock: the rwlock in question.
98 */
99static inline int arch_read_can_lock(arch_rwlock_t *rw)
100{
101 return !arch_write_val_locked(rw->lock);
102}
103
104/**
105 * write_can_lock - would write_trylock() succeed?
106 * @lock: the rwlock in question.
107 */
108static inline int arch_write_can_lock(arch_rwlock_t *rw)
109{
110 return rw->lock == 0;
111}
112
113extern void __read_lock_failed(arch_rwlock_t *rw);
114
115static inline void arch_read_lock(arch_rwlock_t *rw)
116{
117 u32 val = __insn_fetchaddgez4(&rw->lock, 1);
118 if (unlikely(arch_write_val_locked(val)))
119 __read_lock_failed(rw);
120}
121
122extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
123
124static inline void arch_write_lock(arch_rwlock_t *rw)
125{
126 u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
127 if (unlikely(val != 0))
128 __write_lock_failed(rw, val);
129}
130
131static inline void arch_read_unlock(arch_rwlock_t *rw)
132{
133 __insn_mf();
134 __insn_fetchadd4(&rw->lock, -1);
135}
136
137static inline void arch_write_unlock(arch_rwlock_t *rw)
138{
139 __insn_mf();
140 rw->lock = 0;
141}
142
143static inline int arch_read_trylock(arch_rwlock_t *rw)
144{
145 return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
146}
147
148static inline int arch_write_trylock(arch_rwlock_t *rw)
149{
150 u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
151 if (likely(val == 0))
152 return 1;
153 if (!arch_write_val_locked(val))
154 __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
155 return 0;
156}
157
158#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
159#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
160
161#endif /* _ASM_TILE_SPINLOCK_64_H */
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h
index b16e5db8f0e7..c0db34d56be3 100644
--- a/arch/tile/include/asm/stat.h
+++ b/arch/tile/include/asm/stat.h
@@ -1,4 +1,4 @@
1#ifdef CONFIG_COMPAT 1#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
2#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */ 2#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */
3#endif 3#endif
4#include <asm-generic/stat.h> 4#include <asm-generic/stat.h>
diff --git a/arch/tile/include/asm/swab.h b/arch/tile/include/asm/swab.h
index 25c686a00f1d..7c37b38f6c8d 100644
--- a/arch/tile/include/asm/swab.h
+++ b/arch/tile/include/asm/swab.h
@@ -18,12 +18,6 @@
18/* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */ 18/* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */
19#define __arch_swab32(x) __builtin_bswap32(x) 19#define __arch_swab32(x) __builtin_bswap32(x)
20#define __arch_swab64(x) __builtin_bswap64(x) 20#define __arch_swab64(x) __builtin_bswap64(x)
21
22/* Use the variant that is natural for the wordsize. */
23#ifdef CONFIG_64BIT
24#define __arch_swab16(x) (__builtin_bswap64(x) >> 48)
25#else
26#define __arch_swab16(x) (__builtin_bswap32(x) >> 16) 21#define __arch_swab16(x) (__builtin_bswap32(x) >> 16)
27#endif
28 22
29#endif /* _ASM_TILE_SWAB_H */ 23#endif /* _ASM_TILE_SWAB_H */
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 3405b52853b8..bc4f562bd459 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -125,6 +125,7 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
125#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ 125#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
126#define TIF_SECCOMP 6 /* secure computing */ 126#define TIF_SECCOMP 6 /* secure computing */
127#define TIF_MEMDIE 7 /* OOM killer at work */ 127#define TIF_MEMDIE 7 /* OOM killer at work */
128#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
128 129
129#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 130#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
130#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 131#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@@ -134,10 +135,12 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
134#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 135#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
135#define _TIF_SECCOMP (1<<TIF_SECCOMP) 136#define _TIF_SECCOMP (1<<TIF_SECCOMP)
136#define _TIF_MEMDIE (1<<TIF_MEMDIE) 137#define _TIF_MEMDIE (1<<TIF_MEMDIE)
138#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
137 139
138/* Work to do on any return to user space. */ 140/* Work to do on any return to user space. */
139#define _TIF_ALLWORK_MASK \ 141#define _TIF_ALLWORK_MASK \
140 (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|_TIF_ASYNC_TLB) 142 (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\
143 _TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME)
141 144
142/* 145/*
143 * Thread-synchronous status. 146 * Thread-synchronous status.
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h
index 343172d422a9..6fdd0c860193 100644
--- a/arch/tile/include/asm/topology.h
+++ b/arch/tile/include/asm/topology.h
@@ -44,25 +44,64 @@ static inline const struct cpumask *cpumask_of_node(int node)
44/* For now, use numa node -1 for global allocation. */ 44/* For now, use numa node -1 for global allocation. */
45#define pcibus_to_node(bus) ((void)(bus), -1) 45#define pcibus_to_node(bus) ((void)(bus), -1)
46 46
47/*
48 * TILE architecture has many cores integrated in one processor, so we need
49 * setup bigger balance_interval for both CPU/NODE scheduling domains to
50 * reduce process scheduling costs.
51 */
52
53/* sched_domains SD_CPU_INIT for TILE architecture */
54#define SD_CPU_INIT (struct sched_domain) { \
55 .min_interval = 4, \
56 .max_interval = 128, \
57 .busy_factor = 64, \
58 .imbalance_pct = 125, \
59 .cache_nice_tries = 1, \
60 .busy_idx = 2, \
61 .idle_idx = 1, \
62 .newidle_idx = 0, \
63 .wake_idx = 0, \
64 .forkexec_idx = 0, \
65 \
66 .flags = 1*SD_LOAD_BALANCE \
67 | 1*SD_BALANCE_NEWIDLE \
68 | 1*SD_BALANCE_EXEC \
69 | 1*SD_BALANCE_FORK \
70 | 0*SD_BALANCE_WAKE \
71 | 0*SD_WAKE_AFFINE \
72 | 0*SD_PREFER_LOCAL \
73 | 0*SD_SHARE_CPUPOWER \
74 | 0*SD_SHARE_PKG_RESOURCES \
75 | 0*SD_SERIALIZE \
76 , \
77 .last_balance = jiffies, \
78 .balance_interval = 32, \
79}
80
47/* sched_domains SD_NODE_INIT for TILE architecture */ 81/* sched_domains SD_NODE_INIT for TILE architecture */
48#define SD_NODE_INIT (struct sched_domain) { \ 82#define SD_NODE_INIT (struct sched_domain) { \
49 .min_interval = 8, \ 83 .min_interval = 16, \
50 .max_interval = 32, \ 84 .max_interval = 512, \
51 .busy_factor = 32, \ 85 .busy_factor = 32, \
52 .imbalance_pct = 125, \ 86 .imbalance_pct = 125, \
53 .cache_nice_tries = 1, \ 87 .cache_nice_tries = 1, \
54 .busy_idx = 3, \ 88 .busy_idx = 3, \
55 .idle_idx = 1, \ 89 .idle_idx = 1, \
56 .newidle_idx = 2, \ 90 .newidle_idx = 2, \
57 .wake_idx = 1, \ 91 .wake_idx = 1, \
58 .flags = SD_LOAD_BALANCE \ 92 .flags = 1*SD_LOAD_BALANCE \
59 | SD_BALANCE_NEWIDLE \ 93 | 1*SD_BALANCE_NEWIDLE \
60 | SD_BALANCE_EXEC \ 94 | 1*SD_BALANCE_EXEC \
61 | SD_BALANCE_FORK \ 95 | 1*SD_BALANCE_FORK \
62 | SD_WAKE_AFFINE \ 96 | 0*SD_BALANCE_WAKE \
63 | SD_SERIALIZE, \ 97 | 0*SD_WAKE_AFFINE \
64 .last_balance = jiffies, \ 98 | 0*SD_PREFER_LOCAL \
65 .balance_interval = 1, \ 99 | 0*SD_SHARE_CPUPOWER \
100 | 0*SD_SHARE_PKG_RESOURCES \
101 | 1*SD_SERIALIZE \
102 , \
103 .last_balance = jiffies, \
104 .balance_interval = 128, \
66} 105}
67 106
68/* By definition, we create nodes based on online memory. */ 107/* By definition, we create nodes based on online memory. */
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index d06e35f57201..5f20f920f932 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -15,10 +15,14 @@
15#ifndef _ASM_TILE_TRAPS_H 15#ifndef _ASM_TILE_TRAPS_H
16#define _ASM_TILE_TRAPS_H 16#define _ASM_TILE_TRAPS_H
17 17
18#include <arch/chip.h>
19
18/* mm/fault.c */ 20/* mm/fault.c */
19void do_page_fault(struct pt_regs *, int fault_num, 21void do_page_fault(struct pt_regs *, int fault_num,
20 unsigned long address, unsigned long write); 22 unsigned long address, unsigned long write);
23#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
21void do_async_page_fault(struct pt_regs *); 24void do_async_page_fault(struct pt_regs *);
25#endif
22 26
23#ifndef __tilegx__ 27#ifndef __tilegx__
24/* 28/*
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index b35c2db71199..f70bf1c541f1 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -15,7 +15,7 @@
15#if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL) 15#if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL)
16#define _ASM_TILE_UNISTD_H 16#define _ASM_TILE_UNISTD_H
17 17
18#ifndef __LP64__ 18#if !defined(__LP64__) || defined(__SYSCALL_COMPAT)
19/* Use the flavor of this syscall that matches the 32-bit API better. */ 19/* Use the flavor of this syscall that matches the 32-bit API better. */
20#define __ARCH_WANT_SYNC_FILE_RANGE2 20#define __ARCH_WANT_SYNC_FILE_RANGE2
21#endif 21#endif
diff --git a/arch/tile/include/hv/pagesize.h b/arch/tile/include/asm/vga.h
index 58bed114fedd..7b46e754d611 100644
--- a/arch/tile/include/hv/pagesize.h
+++ b/arch/tile/include/asm/vga.h
@@ -10,23 +10,30 @@
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for 11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details. 12 * more details.
13 *
14 * Access to VGA videoram.
13 */ 15 */
14 16
15/** 17#ifndef _ASM_TILE_VGA_H
16 * @file pagesize.h 18#define _ASM_TILE_VGA_H
17 */
18 19
19#ifndef _HV_PAGESIZE_H 20#include <asm/io.h>
20#define _HV_PAGESIZE_H
21 21
22/** The log2 of the size of small pages, in bytes. This value should 22#define VT_BUF_HAVE_RW
23 * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
24 */
25#define HV_LOG2_PAGE_SIZE_SMALL 16
26 23
27/** The log2 of the size of large pages, in bytes. This value should be 24static inline void scr_writew(u16 val, volatile u16 *addr)
28 * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 25{
29 */ 26 __raw_writew(val, (volatile u16 __iomem *) addr);
30#define HV_LOG2_PAGE_SIZE_LARGE 24 27}
28
29static inline u16 scr_readw(volatile const u16 *addr)
30{
31 return __raw_readw((volatile const u16 __iomem *) addr);
32}
33
34#define vga_readb(a) readb((u8 __iomem *)(a))
35#define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a))
36
37#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap(x, s))
31 38
32#endif /* _HV_PAGESIZE_H */ 39#endif
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index ee41bca4c8c4..72ec1e972f15 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -22,8 +22,6 @@
22 22
23#include <arch/chip.h> 23#include <arch/chip.h>
24 24
25#include <hv/pagesize.h>
26
27/* Linux builds want unsigned long constants, but assembler wants numbers */ 25/* Linux builds want unsigned long constants, but assembler wants numbers */
28#ifdef __ASSEMBLER__ 26#ifdef __ASSEMBLER__
29/** One, for assembler */ 27/** One, for assembler */
@@ -44,11 +42,21 @@
44 */ 42 */
45#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) 43#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
46 44
45/** The log2 of the size of small pages, in bytes. This value should
46 * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
47 */
48#define HV_LOG2_PAGE_SIZE_SMALL 16
49
47/** The size of small pages, in bytes. This value should be verified 50/** The size of small pages, in bytes. This value should be verified
48 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 51 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
49 */ 52 */
50#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) 53#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL)
51 54
55/** The log2 of the size of large pages, in bytes. This value should be
56 * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
57 */
58#define HV_LOG2_PAGE_SIZE_LARGE 24
59
52/** The size of large pages, in bytes. This value should be verified 60/** The size of large pages, in bytes. This value should be verified
53 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 61 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
54 */ 62 */
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index 55a6a74974b4..1dc71eabfc5a 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -14,19 +14,11 @@
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/string.h> 16#include <linux/string.h>
17
18#include <asm/backtrace.h> 17#include <asm/backtrace.h>
19
20#include <arch/chip.h>
21
22#include <asm/opcode-tile.h> 18#include <asm/opcode-tile.h>
19#include <arch/abi.h>
23 20
24 21#ifdef __tilegx__
25#define TREG_SP 54
26#define TREG_LR 55
27
28
29#if TILE_CHIP >= 10
30#define tile_bundle_bits tilegx_bundle_bits 22#define tile_bundle_bits tilegx_bundle_bits
31#define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE 23#define TILE_MAX_INSTRUCTIONS_PER_BUNDLE TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE
32#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES 24#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
@@ -47,7 +39,7 @@ typedef long long bt_int_reg_t;
47typedef int bt_int_reg_t; 39typedef int bt_int_reg_t;
48#endif 40#endif
49 41
50/** A decoded bundle used for backtracer analysis. */ 42/* A decoded bundle used for backtracer analysis. */
51struct BacktraceBundle { 43struct BacktraceBundle {
52 tile_bundle_bits bits; 44 tile_bundle_bits bits;
53 int num_insns; 45 int num_insns;
@@ -56,23 +48,7 @@ struct BacktraceBundle {
56}; 48};
57 49
58 50
59/* This implementation only makes sense for native tools. */ 51/* Locates an instruction inside the given bundle that
60/** Default function to read memory. */
61static bool bt_read_memory(void *result, VirtualAddress addr,
62 unsigned int size, void *extra)
63{
64 /* FIXME: this should do some horrible signal stuff to catch
65 * SEGV cleanly and fail.
66 *
67 * Or else the caller should do the setjmp for efficiency.
68 */
69
70 memcpy(result, (const void *)addr, size);
71 return true;
72}
73
74
75/** Locates an instruction inside the given bundle that
76 * has the specified mnemonic, and whose first 'num_operands_to_match' 52 * has the specified mnemonic, and whose first 'num_operands_to_match'
77 * operands exactly match those in 'operand_values'. 53 * operands exactly match those in 'operand_values'.
78 */ 54 */
@@ -107,13 +83,13 @@ static const struct tile_decoded_instruction *find_matching_insn(
107 return NULL; 83 return NULL;
108} 84}
109 85
110/** Does this bundle contain an 'iret' instruction? */ 86/* Does this bundle contain an 'iret' instruction? */
111static inline bool bt_has_iret(const struct BacktraceBundle *bundle) 87static inline bool bt_has_iret(const struct BacktraceBundle *bundle)
112{ 88{
113 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL; 89 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
114} 90}
115 91
116/** Does this bundle contain an 'addi sp, sp, OFFSET' or 92/* Does this bundle contain an 'addi sp, sp, OFFSET' or
117 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET? 93 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
118 */ 94 */
119static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust) 95static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
@@ -124,7 +100,7 @@ static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
124 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2); 100 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
125 if (insn == NULL) 101 if (insn == NULL)
126 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2); 102 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
127#if TILE_CHIP >= 10 103#ifdef __tilegx__
128 if (insn == NULL) 104 if (insn == NULL)
129 insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2); 105 insn = find_matching_insn(bundle, TILEGX_OPC_ADDXLI, vals, 2);
130 if (insn == NULL) 106 if (insn == NULL)
@@ -137,7 +113,7 @@ static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
137 return true; 113 return true;
138} 114}
139 115
140/** Does this bundle contain any 'info OP' or 'infol OP' 116/* Does this bundle contain any 'info OP' or 'infol OP'
141 * instruction, and if so, what are their OP? Note that OP is interpreted 117 * instruction, and if so, what are their OP? Note that OP is interpreted
142 * as an unsigned value by this code since that's what the caller wants. 118 * as an unsigned value by this code since that's what the caller wants.
143 * Returns the number of info ops found. 119 * Returns the number of info ops found.
@@ -161,7 +137,7 @@ static int bt_get_info_ops(const struct BacktraceBundle *bundle,
161 return num_ops; 137 return num_ops;
162} 138}
163 139
164/** Does this bundle contain a jrp instruction, and if so, to which 140/* Does this bundle contain a jrp instruction, and if so, to which
165 * register is it jumping? 141 * register is it jumping?
166 */ 142 */
167static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg) 143static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
@@ -175,7 +151,7 @@ static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
175 return true; 151 return true;
176} 152}
177 153
178/** Does this bundle modify the specified register in any way? */ 154/* Does this bundle modify the specified register in any way? */
179static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg) 155static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
180{ 156{
181 int i, j; 157 int i, j;
@@ -195,34 +171,34 @@ static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
195 return false; 171 return false;
196} 172}
197 173
198/** Does this bundle modify sp? */ 174/* Does this bundle modify sp? */
199static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle) 175static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle)
200{ 176{
201 return bt_modifies_reg(bundle, TREG_SP); 177 return bt_modifies_reg(bundle, TREG_SP);
202} 178}
203 179
204/** Does this bundle modify lr? */ 180/* Does this bundle modify lr? */
205static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle) 181static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle)
206{ 182{
207 return bt_modifies_reg(bundle, TREG_LR); 183 return bt_modifies_reg(bundle, TREG_LR);
208} 184}
209 185
210/** Does this bundle contain the instruction 'move fp, sp'? */ 186/* Does this bundle contain the instruction 'move fp, sp'? */
211static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle) 187static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
212{ 188{
213 static const int vals[2] = { 52, TREG_SP }; 189 static const int vals[2] = { 52, TREG_SP };
214 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; 190 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
215} 191}
216 192
217/** Does this bundle contain a store of lr to sp? */ 193/* Does this bundle contain a store of lr to sp? */
218static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle) 194static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
219{ 195{
220 static const int vals[2] = { TREG_SP, TREG_LR }; 196 static const int vals[2] = { TREG_SP, TREG_LR };
221 return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL; 197 return find_matching_insn(bundle, OPCODE_STORE, vals, 2) != NULL;
222} 198}
223 199
224#if TILE_CHIP >= 10 200#ifdef __tilegx__
225/** Track moveli values placed into registers. */ 201/* Track moveli values placed into registers. */
226static inline void bt_update_moveli(const struct BacktraceBundle *bundle, 202static inline void bt_update_moveli(const struct BacktraceBundle *bundle,
227 int moveli_args[]) 203 int moveli_args[])
228{ 204{
@@ -238,7 +214,7 @@ static inline void bt_update_moveli(const struct BacktraceBundle *bundle,
238 } 214 }
239} 215}
240 216
241/** Does this bundle contain an 'add sp, sp, reg' instruction 217/* Does this bundle contain an 'add sp, sp, reg' instruction
242 * from a register that we saw a moveli into, and if so, what 218 * from a register that we saw a moveli into, and if so, what
243 * is the value in the register? 219 * is the value in the register?
244 */ 220 */
@@ -260,11 +236,11 @@ static bool bt_has_add_sp(const struct BacktraceBundle *bundle, int *adjust,
260} 236}
261#endif 237#endif
262 238
263/** Locates the caller's PC and SP for a program starting at the 239/* Locates the caller's PC and SP for a program starting at the
264 * given address. 240 * given address.
265 */ 241 */
266static void find_caller_pc_and_caller_sp(CallerLocation *location, 242static void find_caller_pc_and_caller_sp(CallerLocation *location,
267 const VirtualAddress start_pc, 243 const unsigned long start_pc,
268 BacktraceMemoryReader read_memory_func, 244 BacktraceMemoryReader read_memory_func,
269 void *read_memory_func_extra) 245 void *read_memory_func_extra)
270{ 246{
@@ -288,9 +264,9 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
288 tile_bundle_bits prefetched_bundles[32]; 264 tile_bundle_bits prefetched_bundles[32];
289 int num_bundles_prefetched = 0; 265 int num_bundles_prefetched = 0;
290 int next_bundle = 0; 266 int next_bundle = 0;
291 VirtualAddress pc; 267 unsigned long pc;
292 268
293#if TILE_CHIP >= 10 269#ifdef __tilegx__
294 /* Naively try to track moveli values to support addx for -m32. */ 270 /* Naively try to track moveli values to support addx for -m32. */
295 int moveli_args[TILEGX_NUM_REGISTERS] = { 0 }; 271 int moveli_args[TILEGX_NUM_REGISTERS] = { 0 };
296#endif 272#endif
@@ -369,10 +345,6 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
369 /* Weird; reserved value, ignore it. */ 345 /* Weird; reserved value, ignore it. */
370 continue; 346 continue;
371 } 347 }
372 if (info_operand & ENTRY_POINT_INFO_OP) {
373 /* This info op is ignored by the backtracer. */
374 continue;
375 }
376 348
377 /* Skip info ops which are not in the 349 /* Skip info ops which are not in the
378 * "one_ago" mode we want right now. 350 * "one_ago" mode we want right now.
@@ -453,7 +425,7 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
453 if (!sp_determined) { 425 if (!sp_determined) {
454 int adjust; 426 int adjust;
455 if (bt_has_addi_sp(&bundle, &adjust) 427 if (bt_has_addi_sp(&bundle, &adjust)
456#if TILE_CHIP >= 10 428#ifdef __tilegx__
457 || bt_has_add_sp(&bundle, &adjust, moveli_args) 429 || bt_has_add_sp(&bundle, &adjust, moveli_args)
458#endif 430#endif
459 ) { 431 ) {
@@ -504,7 +476,7 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
504 } 476 }
505 } 477 }
506 478
507#if TILE_CHIP >= 10 479#ifdef __tilegx__
508 /* Track moveli arguments for -m32 mode. */ 480 /* Track moveli arguments for -m32 mode. */
509 bt_update_moveli(&bundle, moveli_args); 481 bt_update_moveli(&bundle, moveli_args);
510#endif 482#endif
@@ -546,18 +518,26 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
546 } 518 }
547} 519}
548 520
521/* Initializes a backtracer to start from the given location.
522 *
523 * If the frame pointer cannot be determined it is set to -1.
524 *
525 * state: The state to be filled in.
526 * read_memory_func: A callback that reads memory.
527 * read_memory_func_extra: An arbitrary argument to read_memory_func.
528 * pc: The current PC.
529 * lr: The current value of the 'lr' register.
530 * sp: The current value of the 'sp' register.
531 * r52: The current value of the 'r52' register.
532 */
549void backtrace_init(BacktraceIterator *state, 533void backtrace_init(BacktraceIterator *state,
550 BacktraceMemoryReader read_memory_func, 534 BacktraceMemoryReader read_memory_func,
551 void *read_memory_func_extra, 535 void *read_memory_func_extra,
552 VirtualAddress pc, VirtualAddress lr, 536 unsigned long pc, unsigned long lr,
553 VirtualAddress sp, VirtualAddress r52) 537 unsigned long sp, unsigned long r52)
554{ 538{
555 CallerLocation location; 539 CallerLocation location;
556 VirtualAddress fp, initial_frame_caller_pc; 540 unsigned long fp, initial_frame_caller_pc;
557
558 if (read_memory_func == NULL) {
559 read_memory_func = bt_read_memory;
560 }
561 541
562 /* Find out where we are in the initial frame. */ 542 /* Find out where we are in the initial frame. */
563 find_caller_pc_and_caller_sp(&location, pc, 543 find_caller_pc_and_caller_sp(&location, pc,
@@ -630,12 +610,15 @@ void backtrace_init(BacktraceIterator *state,
630/* Handle the case where the register holds more bits than the VA. */ 610/* Handle the case where the register holds more bits than the VA. */
631static bool valid_addr_reg(bt_int_reg_t reg) 611static bool valid_addr_reg(bt_int_reg_t reg)
632{ 612{
633 return ((VirtualAddress)reg == reg); 613 return ((unsigned long)reg == reg);
634} 614}
635 615
616/* Advances the backtracing state to the calling frame, returning
617 * true iff successful.
618 */
636bool backtrace_next(BacktraceIterator *state) 619bool backtrace_next(BacktraceIterator *state)
637{ 620{
638 VirtualAddress next_fp, next_pc; 621 unsigned long next_fp, next_pc;
639 bt_int_reg_t next_frame[2]; 622 bt_int_reg_t next_frame[2];
640 623
641 if (state->fp == -1) { 624 if (state->fp == -1) {
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index dbc213adf5e1..bf5e9d70266c 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -135,26 +135,15 @@ long tile_compat_sys_msgrcv(int msqid,
135 135
136/* Provide the compat syscall number to call mapping. */ 136/* Provide the compat syscall number to call mapping. */
137#undef __SYSCALL 137#undef __SYSCALL
138#define __SYSCALL(nr, call) [nr] = (compat_##call), 138#define __SYSCALL(nr, call) [nr] = (call),
139 139
140/* The generic versions of these don't work for Tile. */ 140/* The generic versions of these don't work for Tile. */
141#define compat_sys_msgrcv tile_compat_sys_msgrcv 141#define compat_sys_msgrcv tile_compat_sys_msgrcv
142#define compat_sys_msgsnd tile_compat_sys_msgsnd 142#define compat_sys_msgsnd tile_compat_sys_msgsnd
143 143
144/* See comments in sys.c */ 144/* See comments in sys.c */
145#define compat_sys_fadvise64 sys32_fadvise64
146#define compat_sys_fadvise64_64 sys32_fadvise64_64 145#define compat_sys_fadvise64_64 sys32_fadvise64_64
147#define compat_sys_readahead sys32_readahead 146#define compat_sys_readahead sys32_readahead
148#define compat_sys_sync_file_range compat_sys_sync_file_range2
149
150/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
151#define compat_sys_stat64 sys_stat64
152#define compat_sys_lstat64 sys_lstat64
153#define compat_sys_fstat64 sys_fstat64
154#define compat_sys_fstatat64 sys_fstatat64
155
156/* The native sys_ptrace dynamically handles compat binaries. */
157#define compat_sys_ptrace sys_ptrace
158 147
159/* Call the trampolines to manage pt_regs where necessary. */ 148/* Call the trampolines to manage pt_regs where necessary. */
160#define compat_sys_execve _compat_sys_execve 149#define compat_sys_execve _compat_sys_execve
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index dbb0dfc7bece..a7869ad62776 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -317,7 +317,7 @@ long compat_sys_rt_sigreturn(struct pt_regs *regs)
317 return 0; 317 return 0;
318 318
319badframe: 319badframe:
320 force_sig(SIGSEGV, current); 320 signal_fault("bad sigreturn frame", regs, frame, 0);
321 return 0; 321 return 0;
322} 322}
323 323
@@ -431,6 +431,6 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
431 return 0; 431 return 0;
432 432
433give_sigsegv: 433give_sigsegv:
434 force_sigsegv(sig, current); 434 signal_fault("bad setup frame", regs, frame, sig);
435 return -EFAULT; 435 return -EFAULT;
436} 436}
diff --git a/arch/tile/kernel/futex_64.S b/arch/tile/kernel/futex_64.S
new file mode 100644
index 000000000000..f465d1eda20f
--- /dev/null
+++ b/arch/tile/kernel/futex_64.S
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Atomically access user memory, but use MMU to avoid propagating
15 * kernel exceptions.
16 */
17
18#include <linux/linkage.h>
19#include <asm/errno.h>
20#include <asm/futex.h>
21#include <asm/page.h>
22#include <asm/processor.h>
23
24/*
25 * Provide a set of atomic memory operations supporting <asm/futex.h>.
26 *
27 * r0: user address to manipulate
28 * r1: new value to write, or for cmpxchg, old value to compare against
29 * r2: (cmpxchg only) new value to write
30 *
31 * Return __get_user struct, r0 with value, r1 with error.
32 */
33#define FUTEX_OP(name, ...) \
34STD_ENTRY(futex_##name) \
35 __VA_ARGS__; \
36 { \
37 move r1, zero; \
38 jrp lr \
39 }; \
40 STD_ENDPROC(futex_##name); \
41 .pushsection __ex_table,"a"; \
42 .quad 1b, get_user_fault; \
43 .popsection
44
45 .pushsection .fixup,"ax"
46get_user_fault:
47 { movei r1, -EFAULT; jrp lr }
48 ENDPROC(get_user_fault)
49 .popsection
50
51FUTEX_OP(cmpxchg, mtspr CMPEXCH_VALUE, r1; 1: cmpexch4 r0, r0, r2)
52FUTEX_OP(set, 1: exch4 r0, r0, r1)
53FUTEX_OP(add, 1: fetchadd4 r0, r0, r1)
54FUTEX_OP(or, 1: fetchor4 r0, r0, r1)
55FUTEX_OP(andn, nor r1, r1, zero; 1: fetchand4 r0, r0, r1)
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index e910530436e6..3bddef710de4 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -268,12 +268,10 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
268 found_processes = 0; 268 found_processes = 0;
269 list_for_each_entry(p, &rect->task_head, thread.hardwall_list) { 269 list_for_each_entry(p, &rect->task_head, thread.hardwall_list) {
270 BUG_ON(p->thread.hardwall != rect); 270 BUG_ON(p->thread.hardwall != rect);
271 if (p->sighand) { 271 if (!(p->flags & PF_EXITING)) {
272 found_processes = 1; 272 found_processes = 1;
273 pr_notice("hardwall: killing %d\n", p->pid); 273 pr_notice("hardwall: killing %d\n", p->pid);
274 spin_lock(&p->sighand->siglock); 274 do_send_sig_info(info.si_signo, &info, p, false);
275 __group_send_sig_info(info.si_signo, &info, p);
276 spin_unlock(&p->sighand->siglock);
277 } 275 }
278 } 276 }
279 if (!found_processes) 277 if (!found_processes)
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
new file mode 100644
index 000000000000..6bc3a932fe45
--- /dev/null
+++ b/arch/tile/kernel/head_64.S
@@ -0,0 +1,269 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE startup code.
15 */
16
17#include <linux/linkage.h>
18#include <linux/init.h>
19#include <asm/page.h>
20#include <asm/pgtable.h>
21#include <asm/thread_info.h>
22#include <asm/processor.h>
23#include <asm/asm-offsets.h>
24#include <hv/hypervisor.h>
25#include <arch/chip.h>
26#include <arch/spr_def.h>
27
28/*
29 * This module contains the entry code for kernel images. It performs the
30 * minimal setup needed to call the generic C routines.
31 */
32
33 __HEAD
34ENTRY(_start)
35 /* Notify the hypervisor of what version of the API we want */
36 {
37 movei r1, TILE_CHIP
38 movei r2, TILE_CHIP_REV
39 }
40 {
41 moveli r0, _HV_VERSION
42 jal hv_init
43 }
44 /* Get a reasonable default ASID in r0 */
45 {
46 move r0, zero
47 jal hv_inquire_asid
48 }
49
50 /*
51 * Install the default page table. The relocation required to
52 * statically define the table is a bit too complex, so we have
53 * to plug in the pointer from the L0 to the L1 table by hand.
54 * We only do this on the first cpu to boot, though, since the
55 * other CPUs should see a properly-constructed page table.
56 */
57 {
58 v4int_l r2, zero, r0 /* ASID for hv_install_context */
59 moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
60 }
61 {
62 shl16insli r4, r4, hw0(swapper_pgprot - PAGE_OFFSET)
63 }
64 {
65 ld r1, r4 /* access_pte for hv_install_context */
66 }
67 {
68 moveli r0, hw1_last(.Lsv_data_pmd - PAGE_OFFSET)
69 moveli r6, hw1_last(temp_data_pmd - PAGE_OFFSET)
70 }
71 {
72 /* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
73 bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
74 inv r4
75 }
76 bnez r7, .Lno_write
77 {
78 shl16insli r0, r0, hw0(.Lsv_data_pmd - PAGE_OFFSET)
79 shl16insli r6, r6, hw0(temp_data_pmd - PAGE_OFFSET)
80 }
81 {
82 /* Cut off the low bits of the PT address. */
83 shrui r6, r6, HV_LOG2_PAGE_TABLE_ALIGN
84 /* Start with our access pte. */
85 move r5, r1
86 }
87 {
88 /* Stuff the address into the page table pointer slot of the PTE. */
89 bfins r5, r6, HV_PTE_INDEX_PTFN, \
90 HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
91 }
92 {
93 /* Store the L0 data PTE. */
94 st r0, r5
95 addli r6, r6, (temp_code_pmd - temp_data_pmd) >> \
96 HV_LOG2_PAGE_TABLE_ALIGN
97 }
98 {
99 addli r0, r0, .Lsv_code_pmd - .Lsv_data_pmd
100 bfins r5, r6, HV_PTE_INDEX_PTFN, \
101 HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
102 }
103 /* Store the L0 code PTE. */
104 st r0, r5
105
106.Lno_write:
107 moveli lr, hw2_last(1f)
108 {
109 shl16insli lr, lr, hw1(1f)
110 moveli r0, hw1_last(swapper_pg_dir - PAGE_OFFSET)
111 }
112 {
113 shl16insli lr, lr, hw0(1f)
114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
115 }
116 {
117 move r3, zero
118 j hv_install_context
119 }
1201:
121
122 /* Install the interrupt base. */
123 moveli r0, hw2_last(MEM_SV_START)
124 shl16insli r0, r0, hw1(MEM_SV_START)
125 shl16insli r0, r0, hw0(MEM_SV_START)
126 mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
127
128 /*
129 * Get our processor number and save it away in SAVE_K_0.
130 * Extract stuff from the topology structure: r4 = y, r6 = x,
131 * r5 = width. FIXME: consider whether we want to just make these
132 * 64-bit values (and if so fix smp_topology write below, too).
133 */
134 jal hv_inquire_topology
135 {
136 v4int_l r5, zero, r1 /* r5 = width */
137 shrui r4, r0, 32 /* r4 = y */
138 }
139 {
140 v4int_l r6, zero, r0 /* r6 = x */
141 mul_lu_lu r4, r4, r5
142 }
143 {
144 add r4, r4, r6 /* r4 == cpu == y*width + x */
145 }
146
147#ifdef CONFIG_SMP
148 /*
149 * Load up our per-cpu offset. When the first (master) tile
150 * boots, this value is still zero, so we will load boot_pc
151 * with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
152 * The master tile initializes the per-cpu offset array, so that
153 * when subsequent (secondary) tiles boot, they will instead load
154 * from their per-cpu versions of boot_sp and boot_pc.
155 */
156 moveli r5, hw2_last(__per_cpu_offset)
157 shl16insli r5, r5, hw1(__per_cpu_offset)
158 shl16insli r5, r5, hw0(__per_cpu_offset)
159 shl3add r5, r4, r5
160 ld r5, r5
161 bnez r5, 1f
162
163 /*
164 * Save the width and height to the smp_topology variable
165 * for later use.
166 */
167 moveli r0, hw2_last(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
168 shl16insli r0, r0, hw1(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
169 shl16insli r0, r0, hw0(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
170 st r0, r1
1711:
172#else
173 move r5, zero
174#endif
175
176 /* Load and go with the correct pc and sp. */
177 {
178 moveli r1, hw2_last(boot_sp)
179 moveli r0, hw2_last(boot_pc)
180 }
181 {
182 shl16insli r1, r1, hw1(boot_sp)
183 shl16insli r0, r0, hw1(boot_pc)
184 }
185 {
186 shl16insli r1, r1, hw0(boot_sp)
187 shl16insli r0, r0, hw0(boot_pc)
188 }
189 {
190 add r1, r1, r5
191 add r0, r0, r5
192 }
193 ld r0, r0
194 ld sp, r1
195 or r4, sp, r4
196 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
197 addi sp, sp, -STACK_TOP_DELTA
198 {
199 move lr, zero /* stop backtraces in the called function */
200 jr r0
201 }
202 ENDPROC(_start)
203
204__PAGE_ALIGNED_BSS
205 .align PAGE_SIZE
206ENTRY(empty_zero_page)
207 .fill PAGE_SIZE,1,0
208 END(empty_zero_page)
209
210 .macro PTE cpa, bits1
211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
213 (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
214 .endm
215
216__PAGE_ALIGNED_DATA
217 .align PAGE_SIZE
218ENTRY(swapper_pg_dir)
219 .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
220.Lsv_data_pmd:
221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
222 .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE
223.Lsv_code_pmd:
224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
225 .org swapper_pg_dir + HV_L0_SIZE
226 END(swapper_pg_dir)
227
228 .align HV_PAGE_TABLE_ALIGN
229ENTRY(temp_data_pmd)
230 /*
231 * We fill the PAGE_OFFSET pmd with huge pages with
232 * VA = PA + PAGE_OFFSET. We remap things with more precise access
233 * permissions later.
234 */
235 .set addr, 0
236 .rept HV_L1_ENTRIES
237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
238 .set addr, addr + HV_PAGE_SIZE_LARGE
239 .endr
240 .org temp_data_pmd + HV_L1_SIZE
241 END(temp_data_pmd)
242
243 .align HV_PAGE_TABLE_ALIGN
244ENTRY(temp_code_pmd)
245 /*
246 * We fill the MEM_SV_START pmd with huge pages with
247 * VA = PA + PAGE_OFFSET. We remap things with more precise access
248 * permissions later.
249 */
250 .set addr, 0
251 .rept HV_L1_ENTRIES
252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
253 .set addr, addr + HV_PAGE_SIZE_LARGE
254 .endr
255 .org temp_code_pmd + HV_L1_SIZE
256 END(temp_code_pmd)
257
258 /*
259 * Isolate swapper_pgprot to its own cache line, since each cpu
260 * starting up will read it using VA-is-PA and local homing.
261 * This would otherwise likely conflict with other data on the cache
262 * line, once we have set its permanent home in the page tables.
263 */
264 __INITDATA
265 .align CHIP_L2_LINE_SIZE()
266ENTRY(swapper_pgprot)
267 .quad HV_PTE_PRESENT | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
268 .align CHIP_L2_LINE_SIZE()
269 END(swapper_pgprot)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index fffcfa6b3a62..72ade79b621b 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -851,14 +851,27 @@ STD_ENTRY(interrupt_return)
851 /* Check to see if there is any work to do before returning to user. */ 851 /* Check to see if there is any work to do before returning to user. */
852 { 852 {
853 addi r29, r32, THREAD_INFO_FLAGS_OFFSET 853 addi r29, r32, THREAD_INFO_FLAGS_OFFSET
854 moveli r28, lo16(_TIF_ALLWORK_MASK) 854 moveli r1, lo16(_TIF_ALLWORK_MASK)
855 } 855 }
856 { 856 {
857 lw r29, r29 857 lw r29, r29
858 auli r28, r28, ha16(_TIF_ALLWORK_MASK) 858 auli r1, r1, ha16(_TIF_ALLWORK_MASK)
859 } 859 }
860 and r28, r29, r28 860 and r1, r29, r1
861 bnz r28, .Lwork_pending 861 bzt r1, .Lrestore_all
862
863 /*
864 * Make sure we have all the registers saved for signal
865 * handling or single-step. Call out to C code to figure out
866 * exactly what we need to do for each flag bit, then if
867 * necessary, reload the flags and recheck.
868 */
869 push_extra_callee_saves r0
870 {
871 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
872 jal do_work_pending
873 }
874 bnz r0, .Lresume_userspace
862 875
863 /* 876 /*
864 * In the NMI case we 877 * In the NMI case we
@@ -1099,99 +1112,6 @@ STD_ENTRY(interrupt_return)
1099 pop_reg r50 1112 pop_reg r50
1100 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51) 1113 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
1101 j .Lcontinue_restore_regs 1114 j .Lcontinue_restore_regs
1102
1103.Lwork_pending:
1104 /* Mask the reschedule flag */
1105 andi r28, r29, _TIF_NEED_RESCHED
1106
1107 {
1108 /*
1109 * If the NEED_RESCHED flag is called, we call schedule(), which
1110 * may drop this context right here and go do something else.
1111 * On return, jump back to .Lresume_userspace and recheck.
1112 */
1113 bz r28, .Lasync_tlb
1114
1115 /* Mask the async-tlb flag */
1116 andi r28, r29, _TIF_ASYNC_TLB
1117 }
1118
1119 jal schedule
1120 FEEDBACK_REENTER(interrupt_return)
1121
1122 /* Reload the flags and check again */
1123 j .Lresume_userspace
1124
1125.Lasync_tlb:
1126 {
1127 bz r28, .Lneed_sigpending
1128
1129 /* Mask the sigpending flag */
1130 andi r28, r29, _TIF_SIGPENDING
1131 }
1132
1133 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1134 jal do_async_page_fault
1135 FEEDBACK_REENTER(interrupt_return)
1136
1137 /*
1138 * Go restart the "resume userspace" process. We may have
1139 * fired a signal, and we need to disable interrupts again.
1140 */
1141 j .Lresume_userspace
1142
1143.Lneed_sigpending:
1144 /*
1145 * At this point we are either doing signal handling or single-step,
1146 * so either way make sure we have all the registers saved.
1147 */
1148 push_extra_callee_saves r0
1149
1150 {
1151 /* If no signal pending, skip to singlestep check */
1152 bz r28, .Lneed_singlestep
1153
1154 /* Mask the singlestep flag */
1155 andi r28, r29, _TIF_SINGLESTEP
1156 }
1157
1158 jal do_signal
1159 FEEDBACK_REENTER(interrupt_return)
1160
1161 /* Reload the flags and check again */
1162 j .Lresume_userspace
1163
1164.Lneed_singlestep:
1165 {
1166 /* Get a pointer to the EX1 field */
1167 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
1168
1169 /* If we get here, our bit must be set. */
1170 bz r28, .Lwork_confusion
1171 }
1172 /* If we are in priv mode, don't single step */
1173 lw r28, r29
1174 andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
1175 bnz r28, .Lrestore_all
1176
1177 /* Allow interrupts within the single step code */
1178 TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */
1179 IRQ_ENABLE(r20, r21)
1180
1181 /* try to single-step the current instruction */
1182 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1183 jal single_step_once
1184 FEEDBACK_REENTER(interrupt_return)
1185
1186 /* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */
1187 IRQ_DISABLE(r20,r21)
1188
1189 j .Lrestore_all
1190
1191.Lwork_confusion:
1192 move r0, r28
1193 panic "thread_info allwork flags unhandled on userspace resume: %#x"
1194
1195 STD_ENDPROC(interrupt_return) 1115 STD_ENDPROC(interrupt_return)
1196 1116
1197 /* 1117 /*
@@ -1550,7 +1470,10 @@ STD_ENTRY(_sys_clone)
1550 * We place it in the __HEAD section to ensure it is relatively 1470 * We place it in the __HEAD section to ensure it is relatively
1551 * near to the intvec_SWINT_1 code (reachable by a conditional branch). 1471 * near to the intvec_SWINT_1 code (reachable by a conditional branch).
1552 * 1472 *
1553 * Must match register usage in do_page_fault(). 1473 * Our use of ATOMIC_LOCK_REG here must match do_page_fault_ics().
1474 *
1475 * As we do in lib/atomic_asm_32.S, we bypass a store if the value we
1476 * would store is the same as the value we just loaded.
1554 */ 1477 */
1555 __HEAD 1478 __HEAD
1556 .align 64 1479 .align 64
@@ -1611,17 +1534,7 @@ ENTRY(sys_cmpxchg)
1611 { 1534 {
1612 shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT 1535 shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
1613 slt_u r23, r0, r23 1536 slt_u r23, r0, r23
1614 1537 lw r26, r0 /* see comment in the "#else" for the "lw r26". */
1615 /*
1616 * Ensure that the TLB is loaded before we take out the lock.
1617 * On TILEPro, this will start fetching the value all the way
1618 * into our L1 as well (and if it gets modified before we
1619 * grab the lock, it will be invalidated from our cache
1620 * before we reload it). On tile64, we'll start fetching it
1621 * into our L1 if we're the home, and if we're not, we'll
1622 * still at least start fetching it into the home's L2.
1623 */
1624 lw r26, r0
1625 } 1538 }
1626 { 1539 {
1627 s2a r21, r20, r21 1540 s2a r21, r20, r21
@@ -1637,18 +1550,9 @@ ENTRY(sys_cmpxchg)
1637 bbs r23, .Lcmpxchg64 1550 bbs r23, .Lcmpxchg64
1638 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */ 1551 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1639 } 1552 }
1640
1641 { 1553 {
1642 /*
1643 * We very carefully align the code that actually runs with
1644 * the lock held (nine bundles) so that we know it is all in
1645 * the icache when we start. This instruction (the jump) is
1646 * at the start of the first cache line, address zero mod 64;
1647 * we jump to somewhere in the second cache line to issue the
1648 * tns, then jump back to finish up.
1649 */
1650 s2a ATOMIC_LOCK_REG_NAME, r25, r21 1554 s2a ATOMIC_LOCK_REG_NAME, r25, r21
1651 j .Lcmpxchg32_tns 1555 j .Lcmpxchg32_tns /* see comment in the #else for the jump. */
1652 } 1556 }
1653 1557
1654#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 1558#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
@@ -1713,24 +1617,25 @@ ENTRY(sys_cmpxchg)
1713 { 1617 {
1714 /* 1618 /*
1715 * We very carefully align the code that actually runs with 1619 * We very carefully align the code that actually runs with
1716 * the lock held (nine bundles) so that we know it is all in 1620 * the lock held (twelve bundles) so that we know it is all in
1717 * the icache when we start. This instruction (the jump) is 1621 * the icache when we start. This instruction (the jump) is
1718 * at the start of the first cache line, address zero mod 64; 1622 * at the start of the first cache line, address zero mod 64;
1719 * we jump to somewhere in the second cache line to issue the 1623 * we jump to the very end of the second cache line to get that
1720 * tns, then jump back to finish up. 1624 * line loaded in the icache, then fall through to issue the tns
1625 * in the third cache line, at which point it's all cached.
1626 * Note that is for performance, not correctness.
1721 */ 1627 */
1722 j .Lcmpxchg32_tns 1628 j .Lcmpxchg32_tns
1723 } 1629 }
1724 1630
1725#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 1631#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1726 1632
1727 ENTRY(__sys_cmpxchg_grab_lock) 1633/* Symbol for do_page_fault_ics() to use to compare against the PC. */
1634.global __sys_cmpxchg_grab_lock
1635__sys_cmpxchg_grab_lock:
1728 1636
1729 /* 1637 /*
1730 * Perform the actual cmpxchg or atomic_update. 1638 * Perform the actual cmpxchg or atomic_update.
1731 * Note that the system <arch/atomic.h> header relies on
1732 * atomic_update() to always perform an "mf", so don't make
1733 * it optional or conditional without modifying that code.
1734 */ 1639 */
1735.Ldo_cmpxchg32: 1640.Ldo_cmpxchg32:
1736 { 1641 {
@@ -1748,10 +1653,13 @@ ENTRY(sys_cmpxchg)
1748 } 1653 }
1749 { 1654 {
1750 mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */ 1655 mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */
1751 bbns r22, .Lcmpxchg32_mismatch 1656 bbns r22, .Lcmpxchg32_nostore
1752 } 1657 }
1658 seq r22, r24, r21 /* Are we storing the value we loaded? */
1659 bbs r22, .Lcmpxchg32_nostore
1753 sw r0, r24 1660 sw r0, r24
1754 1661
1662 /* The following instruction is the start of the second cache line. */
1755 /* Do slow mtspr here so the following "mf" waits less. */ 1663 /* Do slow mtspr here so the following "mf" waits less. */
1756 { 1664 {
1757 move sp, r27 1665 move sp, r27
@@ -1759,7 +1667,6 @@ ENTRY(sys_cmpxchg)
1759 } 1667 }
1760 mf 1668 mf
1761 1669
1762 /* The following instruction is the start of the second cache line. */
1763 { 1670 {
1764 move r0, r21 1671 move r0, r21
1765 sw ATOMIC_LOCK_REG_NAME, zero 1672 sw ATOMIC_LOCK_REG_NAME, zero
@@ -1767,7 +1674,7 @@ ENTRY(sys_cmpxchg)
1767 iret 1674 iret
1768 1675
1769 /* Duplicated code here in the case where we don't overlap "mf" */ 1676 /* Duplicated code here in the case where we don't overlap "mf" */
1770.Lcmpxchg32_mismatch: 1677.Lcmpxchg32_nostore:
1771 { 1678 {
1772 move r0, r21 1679 move r0, r21
1773 sw ATOMIC_LOCK_REG_NAME, zero 1680 sw ATOMIC_LOCK_REG_NAME, zero
@@ -1783,8 +1690,6 @@ ENTRY(sys_cmpxchg)
1783 * and for 64-bit cmpxchg. We provide it as a macro and put 1690 * and for 64-bit cmpxchg. We provide it as a macro and put
1784 * it into both versions. We can't share the code literally 1691 * it into both versions. We can't share the code literally
1785 * since it depends on having the right branch-back address. 1692 * since it depends on having the right branch-back address.
1786 * Note that the first few instructions should share the cache
1787 * line with the second half of the actual locked code.
1788 */ 1693 */
1789 .macro cmpxchg_lock, bitwidth 1694 .macro cmpxchg_lock, bitwidth
1790 1695
@@ -1810,7 +1715,7 @@ ENTRY(sys_cmpxchg)
1810 } 1715 }
1811 /* 1716 /*
1812 * The preceding instruction is the last thing that must be 1717 * The preceding instruction is the last thing that must be
1813 * on the second cache line. 1718 * hot in the icache before we do the "tns" above.
1814 */ 1719 */
1815 1720
1816#ifdef CONFIG_SMP 1721#ifdef CONFIG_SMP
@@ -1841,6 +1746,12 @@ ENTRY(sys_cmpxchg)
1841 .endm 1746 .endm
1842 1747
1843.Lcmpxchg32_tns: 1748.Lcmpxchg32_tns:
1749 /*
1750 * This is the last instruction on the second cache line.
1751 * The nop here loads the second line, then we fall through
1752 * to the tns to load the third line before we take the lock.
1753 */
1754 nop
1844 cmpxchg_lock 32 1755 cmpxchg_lock 32
1845 1756
1846 /* 1757 /*
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
new file mode 100644
index 000000000000..79c93e10ba27
--- /dev/null
+++ b/arch/tile/kernel/intvec_64.S
@@ -0,0 +1,1231 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Linux interrupt vectors.
15 */
16
17#include <linux/linkage.h>
18#include <linux/errno.h>
19#include <linux/unistd.h>
20#include <asm/ptrace.h>
21#include <asm/thread_info.h>
22#include <asm/irqflags.h>
23#include <asm/asm-offsets.h>
24#include <asm/types.h>
25#include <hv/hypervisor.h>
26#include <arch/abi.h>
27#include <arch/interrupts.h>
28#include <arch/spr_def.h>
29
30#ifdef CONFIG_PREEMPT
31# error "No support for kernel preemption currently"
32#endif
33
34#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
35
36#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
37
38
39 .macro push_reg reg, ptr=sp, delta=-8
40 {
41 st \ptr, \reg
42 addli \ptr, \ptr, \delta
43 }
44 .endm
45
46 .macro pop_reg reg, ptr=sp, delta=8
47 {
48 ld \reg, \ptr
49 addli \ptr, \ptr, \delta
50 }
51 .endm
52
53 .macro pop_reg_zero reg, zreg, ptr=sp, delta=8
54 {
55 move \zreg, zero
56 ld \reg, \ptr
57 addi \ptr, \ptr, \delta
58 }
59 .endm
60
61 .macro push_extra_callee_saves reg
62 PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
63 push_reg r51, \reg
64 push_reg r50, \reg
65 push_reg r49, \reg
66 push_reg r48, \reg
67 push_reg r47, \reg
68 push_reg r46, \reg
69 push_reg r45, \reg
70 push_reg r44, \reg
71 push_reg r43, \reg
72 push_reg r42, \reg
73 push_reg r41, \reg
74 push_reg r40, \reg
75 push_reg r39, \reg
76 push_reg r38, \reg
77 push_reg r37, \reg
78 push_reg r36, \reg
79 push_reg r35, \reg
80 push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
81 .endm
82
83 .macro panic str
84 .pushsection .rodata, "a"
851:
86 .asciz "\str"
87 .popsection
88 {
89 moveli r0, hw2_last(1b)
90 }
91 {
92 shl16insli r0, r0, hw1(1b)
93 }
94 {
95 shl16insli r0, r0, hw0(1b)
96 jal panic
97 }
98 .endm
99
100
101#ifdef __COLLECT_LINKER_FEEDBACK__
102 .pushsection .text.intvec_feedback,"ax"
103intvec_feedback:
104 .popsection
105#endif
106
107 /*
108 * Default interrupt handler.
109 *
110 * vecnum is where we'll put this code.
111 * c_routine is the C routine we'll call.
112 *
113 * The C routine is passed two arguments:
114 * - A pointer to the pt_regs state.
115 * - The interrupt vector number.
116 *
117 * The "processing" argument specifies the code for processing
118 * the interrupt. Defaults to "handle_interrupt".
119 */
120 .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
121 .org (\vecnum << 8)
122intvec_\vecname:
123 /* Temporarily save a register so we have somewhere to work. */
124
125 mtspr SPR_SYSTEM_SAVE_K_1, r0
126 mfspr r0, SPR_EX_CONTEXT_K_1
127
128 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
129
130 .ifc \vecnum, INT_DOUBLE_FAULT
131 /*
132 * For double-faults from user-space, fall through to the normal
133 * register save and stack setup path. Otherwise, it's the
134 * hypervisor giving us one last chance to dump diagnostics, and we
135 * branch to the kernel_double_fault routine to do so.
136 */
137 beqz r0, 1f
138 j _kernel_double_fault
1391:
140 .else
141 /*
142 * If we're coming from user-space, then set sp to the top of
143 * the kernel stack. Otherwise, assume sp is already valid.
144 */
145 {
146 bnez r0, 0f
147 move r0, sp
148 }
149 .endif
150
151 .ifc \c_routine, do_page_fault
152 /*
153 * The page_fault handler may be downcalled directly by the
154 * hypervisor even when Linux is running and has ICS set.
155 *
156 * In this case the contents of EX_CONTEXT_K_1 reflect the
157 * previous fault and can't be relied on to choose whether or
158 * not to reinitialize the stack pointer. So we add a test
159 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
160 * and if so we don't reinitialize sp, since we must be coming
161 * from Linux. (In fact the precise case is !(val & ~1),
162 * but any Linux PC has to have the high bit set.)
163 *
164 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
165 * any path that turns into a downcall to one of our TLB handlers.
166 *
167 * FIXME: if we end up never using this path, perhaps we should
168 * prevent the hypervisor from generating downcalls in this case.
169 * The advantage of getting a downcall is we can panic in Linux.
170 */
171 mfspr r0, SPR_SYSTEM_SAVE_K_2
172 {
173 bltz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
174 move r0, sp
175 }
176 .endif
177
178
179 /*
180 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
181 * the current stack top in the higher bits. So we recover
182 * our stack top by just masking off the low bits, then
183 * point sp at the top aligned address on the actual stack page.
184 */
185 mfspr r0, SPR_SYSTEM_SAVE_K_0
186 mm r0, zero, LOG2_THREAD_SIZE, 63
187
1880:
189 /*
190 * Align the stack mod 64 so we can properly predict what
191 * cache lines we need to write-hint to reduce memory fetch
192 * latency as we enter the kernel. The layout of memory is
193 * as follows, with cache line 0 at the lowest VA, and cache
194 * line 8 just below the r0 value this "andi" computes.
195 * Note that we never write to cache line 8, and we skip
196 * cache lines 1-3 for syscalls.
197 *
198 * cache line 8: ptregs padding (two words)
199 * cache line 7: sp, lr, pc, ex1, faultnum, orig_r0, flags, cmpexch
200 * cache line 6: r46...r53 (tp)
201 * cache line 5: r38...r45
202 * cache line 4: r30...r37
203 * cache line 3: r22...r29
204 * cache line 2: r14...r21
205 * cache line 1: r6...r13
206 * cache line 0: 2 x frame, r0..r5
207 */
208 andi r0, r0, -64
209
210 /*
211 * Push the first four registers on the stack, so that we can set
212 * them to vector-unique values before we jump to the common code.
213 *
214 * Registers are pushed on the stack as a struct pt_regs,
215 * with the sp initially just above the struct, and when we're
216 * done, sp points to the base of the struct, minus
217 * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
218 *
219 * This routine saves just the first four registers, plus the
220 * stack context so we can do proper backtracing right away,
221 * and defers to handle_interrupt to save the rest.
222 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
223 */
224 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
225 wh64 r0 /* cache line 7 */
226 {
227 st r0, lr
228 addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
229 }
230 {
231 st r0, sp
232 addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
233 }
234 wh64 sp /* cache line 6 */
235 {
236 st sp, r52
237 addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
238 }
239 wh64 sp /* cache line 0 */
240 {
241 st sp, r1
242 addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
243 }
244 {
245 st sp, r2
246 addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
247 }
248 {
249 st sp, r3
250 addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
251 }
252 mfspr r0, SPR_EX_CONTEXT_K_0
253 .ifc \processing,handle_syscall
254 /*
255 * Bump the saved PC by one bundle so that when we return, we won't
256 * execute the same swint instruction again. We need to do this while
257 * we're in the critical section.
258 */
259 addi r0, r0, 8
260 .endif
261 {
262 st sp, r0
263 addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
264 }
265 mfspr r0, SPR_EX_CONTEXT_K_1
266 {
267 st sp, r0
268 addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
269 /*
270 * Use r0 for syscalls so it's a temporary; use r1 for interrupts
271 * so that it gets passed through unchanged to the handler routine.
272 * Note that the .if conditional confusingly spans bundles.
273 */
274 .ifc \processing,handle_syscall
275 movei r0, \vecnum
276 }
277 {
278 st sp, r0
279 .else
280 movei r1, \vecnum
281 }
282 {
283 st sp, r1
284 .endif
285 addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
286 }
287 mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
288 {
289 st sp, r0
290 addi sp, sp, -PTREGS_OFFSET_REG(0) - 8
291 }
292 {
293 st sp, zero /* write zero into "Next SP" frame pointer */
294 addi sp, sp, -8 /* leave SP pointing at bottom of frame */
295 }
296 .ifc \processing,handle_syscall
297 j handle_syscall
298 .else
299 /* Capture per-interrupt SPR context to registers. */
300 .ifc \c_routine, do_page_fault
301 mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
302 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
303 .else
304 .ifc \vecnum, INT_ILL_TRANS
305 mfspr r2, ILL_TRANS_REASON
306 .else
307 .ifc \vecnum, INT_DOUBLE_FAULT
308 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
309 .else
310 .ifc \c_routine, do_trap
311 mfspr r2, GPV_REASON
312 .else
313 .ifc \c_routine, op_handle_perf_interrupt
314 mfspr r2, PERF_COUNT_STS
315#if CHIP_HAS_AUX_PERF_COUNTERS()
316 .else
317 .ifc \c_routine, op_handle_aux_perf_interrupt
318 mfspr r2, AUX_PERF_COUNT_STS
319 .endif
320#endif
321 .endif
322 .endif
323 .endif
324 .endif
325 .endif
326 /* Put function pointer in r0 */
327 moveli r0, hw2_last(\c_routine)
328 shl16insli r0, r0, hw1(\c_routine)
329 {
330 shl16insli r0, r0, hw0(\c_routine)
331 j \processing
332 }
333 .endif
334 ENDPROC(intvec_\vecname)
335
336#ifdef __COLLECT_LINKER_FEEDBACK__
337 .pushsection .text.intvec_feedback,"ax"
338 .org (\vecnum << 5)
339 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
340 jrp lr
341 .popsection
342#endif
343
344 .endm
345
346
347 /*
348 * Save the rest of the registers that we didn't save in the actual
349 * vector itself. We can't use r0-r10 inclusive here.
350 */
351 .macro finish_interrupt_save, function
352
353 /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
354 PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
355 {
356 .ifc \function,handle_syscall
357 st r52, r0
358 .else
359 st r52, zero
360 .endif
361 PTREGS_PTR(r52, PTREGS_OFFSET_TP)
362 }
363 st r52, tp
364 {
365 mfspr tp, CMPEXCH_VALUE
366 PTREGS_PTR(r52, PTREGS_OFFSET_CMPEXCH)
367 }
368
369 /*
370 * For ordinary syscalls, we save neither caller- nor callee-
371 * save registers, since the syscall invoker doesn't expect the
372 * caller-saves to be saved, and the called kernel functions will
373 * take care of saving the callee-saves for us.
374 *
375 * For interrupts we save just the caller-save registers. Saving
376 * them is required (since the "caller" can't save them). Again,
377 * the called kernel functions will restore the callee-save
378 * registers for us appropriately.
379 *
380 * On return, we normally restore nothing special for syscalls,
381 * and just the caller-save registers for interrupts.
382 *
383 * However, there are some important caveats to all this:
384 *
385 * - We always save a few callee-save registers to give us
386 * some scratchpad registers to carry across function calls.
387 *
388 * - fork/vfork/etc require us to save all the callee-save
389 * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
390 *
391 * - We always save r0..r5 and r10 for syscalls, since we need
392 * to reload them a bit later for the actual kernel call, and
393 * since we might need them for -ERESTARTNOINTR, etc.
394 *
395 * - Before invoking a signal handler, we save the unsaved
396 * callee-save registers so they are visible to the
397 * signal handler or any ptracer.
398 *
399 * - If the unsaved callee-save registers are modified, we set
400 * a bit in pt_regs so we know to reload them from pt_regs
401 * and not just rely on the kernel function unwinding.
402 * (Done for ptrace register writes and SA_SIGINFO handler.)
403 */
404 {
405 st r52, tp
406 PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
407 }
408 wh64 r52 /* cache line 4 */
409 push_reg r33, r52
410 push_reg r32, r52
411 push_reg r31, r52
412 .ifc \function,handle_syscall
413 push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
414 push_reg TREG_SYSCALL_NR_NAME, r52, \
415 PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
416 .else
417
418 push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
419 wh64 r52 /* cache line 3 */
420 push_reg r29, r52
421 push_reg r28, r52
422 push_reg r27, r52
423 push_reg r26, r52
424 push_reg r25, r52
425 push_reg r24, r52
426 push_reg r23, r52
427 push_reg r22, r52
428 wh64 r52 /* cache line 2 */
429 push_reg r21, r52
430 push_reg r20, r52
431 push_reg r19, r52
432 push_reg r18, r52
433 push_reg r17, r52
434 push_reg r16, r52
435 push_reg r15, r52
436 push_reg r14, r52
437 wh64 r52 /* cache line 1 */
438 push_reg r13, r52
439 push_reg r12, r52
440 push_reg r11, r52
441 push_reg r10, r52
442 push_reg r9, r52
443 push_reg r8, r52
444 push_reg r7, r52
445 push_reg r6, r52
446
447 .endif
448
449 push_reg r5, r52
450 st r52, r4
451
452 /* Load tp with our per-cpu offset. */
453#ifdef CONFIG_SMP
454 {
455 mfspr r20, SPR_SYSTEM_SAVE_K_0
456 moveli r21, hw2_last(__per_cpu_offset)
457 }
458 {
459 shl16insli r21, r21, hw1(__per_cpu_offset)
460 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
461 }
462 shl16insli r21, r21, hw0(__per_cpu_offset)
463 shl3add r20, r20, r21
464 ld tp, r20
465#else
466 move tp, zero
467#endif
468
469 /*
470 * If we will be returning to the kernel, we will need to
471 * reset the interrupt masks to the state they had before.
472 * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
473 */
474 mfspr r32, SPR_EX_CONTEXT_K_1
475 {
476 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
477 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
478 }
479 beqzt r32, 1f /* zero if from user space */
480 IRQS_DISABLED(r32) /* zero if irqs enabled */
481#if PT_FLAGS_DISABLE_IRQ != 1
482# error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
483#endif
4841:
485 .ifnc \function,handle_syscall
486 /* Record the fact that we saved the caller-save registers above. */
487 ori r32, r32, PT_FLAGS_CALLER_SAVES
488 .endif
489 st r21, r32
490
491#ifdef __COLLECT_LINKER_FEEDBACK__
492 /*
493 * Notify the feedback routines that we were in the
494 * appropriate fixed interrupt vector area. Note that we
495 * still have ICS set at this point, so we can't invoke any
496 * atomic operations or we will panic. The feedback
497 * routines internally preserve r0..r10 and r30 up.
498 */
499 .ifnc \function,handle_syscall
500 shli r20, r1, 5
501 .else
502 moveli r20, INT_SWINT_1 << 5
503 .endif
504 moveli r21, hw2_last(intvec_feedback)
505 shl16insli r21, r21, hw1(intvec_feedback)
506 shl16insli r21, r21, hw0(intvec_feedback)
507 add r20, r20, r21
508 jalr r20
509
510 /* And now notify the feedback routines that we are here. */
511 FEEDBACK_ENTER(\function)
512#endif
513
514 /*
515 * we've captured enough state to the stack (including in
516 * particular our EX_CONTEXT state) that we can now release
517 * the interrupt critical section and replace it with our
518 * standard "interrupts disabled" mask value. This allows
519 * synchronous interrupts (and profile interrupts) to punch
520 * through from this point onwards.
521 */
522 .ifc \function,handle_nmi
523 IRQ_DISABLE_ALL(r20)
524 .else
525 IRQ_DISABLE(r20, r21)
526 .endif
527 mtspr INTERRUPT_CRITICAL_SECTION, zero
528
529 /*
530 * Prepare the first 256 stack bytes to be rapidly accessible
531 * without having to fetch the background data.
532 */
533 addi r52, sp, -64
534 {
535 wh64 r52
536 addi r52, r52, -64
537 }
538 {
539 wh64 r52
540 addi r52, r52, -64
541 }
542 {
543 wh64 r52
544 addi r52, r52, -64
545 }
546 wh64 r52
547
548#ifdef CONFIG_TRACE_IRQFLAGS
549 .ifnc \function,handle_nmi
550 /*
551 * We finally have enough state set up to notify the irq
552 * tracing code that irqs were disabled on entry to the handler.
553 * The TRACE_IRQS_OFF call clobbers registers r0-r29.
554 * For syscalls, we already have the register state saved away
555 * on the stack, so we don't bother to do any register saves here,
556 * and later we pop the registers back off the kernel stack.
557 * For interrupt handlers, save r0-r3 in callee-saved registers.
558 */
559 .ifnc \function,handle_syscall
560 { move r30, r0; move r31, r1 }
561 { move r32, r2; move r33, r3 }
562 .endif
563 TRACE_IRQS_OFF
564 .ifnc \function,handle_syscall
565 { move r0, r30; move r1, r31 }
566 { move r2, r32; move r3, r33 }
567 .endif
568 .endif
569#endif
570
571 .endm
572
573 /*
574 * Redispatch a downcall.
575 */
576 .macro dc_dispatch vecnum, vecname
577 .org (\vecnum << 8)
578intvec_\vecname:
579 j hv_downcall_dispatch
580 ENDPROC(intvec_\vecname)
581 .endm
582
583 /*
584 * Common code for most interrupts. The C function we're eventually
585 * going to is in r0, and the faultnum is in r1; the original
586 * values for those registers are on the stack.
587 */
588 .pushsection .text.handle_interrupt,"ax"
589handle_interrupt:
590 finish_interrupt_save handle_interrupt
591
592 /* Jump to the C routine; it should enable irqs as soon as possible. */
593 {
594 jalr r0
595 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
596 }
597 FEEDBACK_REENTER(handle_interrupt)
598 {
599 movei r30, 0 /* not an NMI */
600 j interrupt_return
601 }
602 STD_ENDPROC(handle_interrupt)
603
604/*
605 * This routine takes a boolean in r30 indicating if this is an NMI.
606 * If so, we also expect a boolean in r31 indicating whether to
607 * re-enable the oprofile interrupts.
608 */
609STD_ENTRY(interrupt_return)
610 /* If we're resuming to kernel space, don't check thread flags. */
611 {
612 bnez r30, .Lrestore_all /* NMIs don't special-case user-space */
613 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
614 }
615 ld r29, r29
616 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
617 {
618 beqzt r29, .Lresume_userspace
619 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
620 }
621
622 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
623 moveli r27, hw2_last(_cpu_idle_nap)
624 {
625 ld r28, r29
626 shl16insli r27, r27, hw1(_cpu_idle_nap)
627 }
628 {
629 shl16insli r27, r27, hw0(_cpu_idle_nap)
630 }
631 {
632 cmpeq r27, r27, r28
633 }
634 {
635 blbc r27, .Lrestore_all
636 addi r28, r28, 8
637 }
638 st r29, r28
639 j .Lrestore_all
640
641.Lresume_userspace:
642 FEEDBACK_REENTER(interrupt_return)
643
644 /*
645 * Disable interrupts so as to make sure we don't
646 * miss an interrupt that sets any of the thread flags (like
647 * need_resched or sigpending) between sampling and the iret.
648 * Routines like schedule() or do_signal() may re-enable
649 * interrupts before returning.
650 */
651 IRQ_DISABLE(r20, r21)
652 TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
653
654 /* Get base of stack in r32; note r30/31 are used as arguments here. */
655 GET_THREAD_INFO(r32)
656
657
658 /* Check to see if there is any work to do before returning to user. */
659 {
660 addi r29, r32, THREAD_INFO_FLAGS_OFFSET
661 moveli r1, hw1_last(_TIF_ALLWORK_MASK)
662 }
663 {
664 ld r29, r29
665 shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK)
666 }
667 and r1, r29, r1
668 beqzt r1, .Lrestore_all
669
670 /*
671 * Make sure we have all the registers saved for signal
672 * handling or single-step. Call out to C code to figure out
673 * exactly what we need to do for each flag bit, then if
674 * necessary, reload the flags and recheck.
675 */
676 push_extra_callee_saves r0
677 {
678 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
679 jal do_work_pending
680 }
681 bnez r0, .Lresume_userspace
682
683 /*
684 * In the NMI case we
685 * omit the call to single_process_check_nohz, which normally checks
686 * to see if we should start or stop the scheduler tick, because
687 * we can't call arbitrary Linux code from an NMI context.
688 * We always call the homecache TLB deferral code to re-trigger
689 * the deferral mechanism.
690 *
691 * The other chunk of responsibility this code has is to reset the
692 * interrupt masks appropriately to reset irqs and NMIs. We have
693 * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
694 * lockdep-type stuff, but we can't set ICS until afterwards, since
695 * ICS can only be used in very tight chunks of code to avoid
696 * tripping over various assertions that it is off.
697 */
698.Lrestore_all:
699 PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
700 {
701 ld r0, r0
702 PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
703 }
704 {
705 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
706 ld r32, r32
707 }
708 bnez r0, 1f
709 j 2f
710#if PT_FLAGS_DISABLE_IRQ != 1
711# error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use blbct below
712#endif
7131: blbct r32, 2f
714 IRQ_DISABLE(r20,r21)
715 TRACE_IRQS_OFF
716 movei r0, 1
717 mtspr INTERRUPT_CRITICAL_SECTION, r0
718 beqzt r30, .Lrestore_regs
719 j 3f
7202: TRACE_IRQS_ON
721 movei r0, 1
722 mtspr INTERRUPT_CRITICAL_SECTION, r0
723 IRQ_ENABLE(r20, r21)
724 beqzt r30, .Lrestore_regs
7253:
726
727
728 /*
729 * We now commit to returning from this interrupt, since we will be
730 * doing things like setting EX_CONTEXT SPRs and unwinding the stack
731 * frame. No calls should be made to any other code after this point.
732 * This code should only be entered with ICS set.
733 * r32 must still be set to ptregs.flags.
734 * We launch loads to each cache line separately first, so we can
735 * get some parallelism out of the memory subsystem.
736 * We start zeroing caller-saved registers throughout, since
737 * that will save some cycles if this turns out to be a syscall.
738 */
739.Lrestore_regs:
740 FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
741
742 /*
743 * Rotate so we have one high bit and one low bit to test.
744 * - low bit says whether to restore all the callee-saved registers,
745 * or just r30-r33, and r52 up.
746 * - high bit (i.e. sign bit) says whether to restore all the
747 * caller-saved registers, or just r0.
748 */
749#if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
750# error Rotate trick does not work :-)
751#endif
752 {
753 rotli r20, r32, 62
754 PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
755 }
756
757 /*
758 * Load cache lines 0, 4, 6 and 7, in that order, then use
759 * the last loaded value, which makes it likely that the other
760 * cache lines have also loaded, at which point we should be
761 * able to safely read all the remaining words on those cache
762 * lines without waiting for the memory subsystem.
763 */
764 pop_reg r0, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
765 pop_reg r30, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_REG(30)
766 pop_reg_zero r52, r3, sp, PTREGS_OFFSET_CMPEXCH - PTREGS_OFFSET_REG(52)
767 pop_reg_zero r21, r27, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_CMPEXCH
768 pop_reg_zero lr, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_EX1
769 {
770 mtspr CMPEXCH_VALUE, r21
771 move r4, zero
772 }
773 pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC
774 {
775 mtspr SPR_EX_CONTEXT_K_1, lr
776 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
777 }
778 {
779 mtspr SPR_EX_CONTEXT_K_0, r21
780 move r5, zero
781 }
782
783 /* Restore callee-saveds that we actually use. */
784 pop_reg_zero r31, r6
785 pop_reg_zero r32, r7
786 pop_reg_zero r33, r8, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
787
788 /*
789 * If we modified other callee-saveds, restore them now.
790 * This is rare, but could be via ptrace or signal handler.
791 */
792 {
793 move r9, zero
794 blbs r20, .Lrestore_callees
795 }
796.Lcontinue_restore_regs:
797
798 /* Check if we're returning from a syscall. */
799 {
800 move r10, zero
801 bltzt r20, 1f /* no, so go restore callee-save registers */
802 }
803
804 /*
805 * Check if we're returning to userspace.
806 * Note that if we're not, we don't worry about zeroing everything.
807 */
808 {
809 addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
810 bnez lr, .Lkernel_return
811 }
812
813 /*
814 * On return from syscall, we've restored r0 from pt_regs, but we
815 * clear the remainder of the caller-saved registers. We could
816 * restore the syscall arguments, but there's not much point,
817 * and it ensures user programs aren't trying to use the
818 * caller-saves if we clear them, as well as avoiding leaking
819 * kernel pointers into userspace.
820 */
821 pop_reg_zero lr, r11, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
822 pop_reg_zero tp, r12, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
823 {
824 ld sp, sp
825 move r13, zero
826 move r14, zero
827 }
828 { move r15, zero; move r16, zero }
829 { move r17, zero; move r18, zero }
830 { move r19, zero; move r20, zero }
831 { move r21, zero; move r22, zero }
832 { move r23, zero; move r24, zero }
833 { move r25, zero; move r26, zero }
834
835 /* Set r1 to errno if we are returning an error, otherwise zero. */
836 {
837 moveli r29, 4096
838 sub r1, zero, r0
839 }
840 {
841 move r28, zero
842 cmpltu r29, r1, r29
843 }
844 {
845 mnz r1, r29, r1
846 move r29, zero
847 }
848 iret
849
850 /*
851 * Not a syscall, so restore caller-saved registers.
852 * First kick off loads for cache lines 1-3, which we're touching
853 * for the first time here.
854 */
855 .align 64
8561: pop_reg r29, sp, PTREGS_OFFSET_REG(21) - PTREGS_OFFSET_REG(29)
857 pop_reg r21, sp, PTREGS_OFFSET_REG(13) - PTREGS_OFFSET_REG(21)
858 pop_reg r13, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(13)
859 pop_reg r1
860 pop_reg r2
861 pop_reg r3
862 pop_reg r4
863 pop_reg r5
864 pop_reg r6
865 pop_reg r7
866 pop_reg r8
867 pop_reg r9
868 pop_reg r10
869 pop_reg r11
870 pop_reg r12, sp, 16
871 /* r13 already restored above */
872 pop_reg r14
873 pop_reg r15
874 pop_reg r16
875 pop_reg r17
876 pop_reg r18
877 pop_reg r19
878 pop_reg r20, sp, 16
879 /* r21 already restored above */
880 pop_reg r22
881 pop_reg r23
882 pop_reg r24
883 pop_reg r25
884 pop_reg r26
885 pop_reg r27
886 pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
887 /* r29 already restored above */
888 bnez lr, .Lkernel_return
889 pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
890 pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
891 ld sp, sp
892 iret
893
894 /*
895 * We can't restore tp when in kernel mode, since a thread might
896 * have migrated from another cpu and brought a stale tp value.
897 */
898.Lkernel_return:
899 pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
900 ld sp, sp
901 iret
902
903 /* Restore callee-saved registers from r34 to r51. */
904.Lrestore_callees:
905 addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
906 pop_reg r34
907 pop_reg r35
908 pop_reg r36
909 pop_reg r37
910 pop_reg r38
911 pop_reg r39
912 pop_reg r40
913 pop_reg r41
914 pop_reg r42
915 pop_reg r43
916 pop_reg r44
917 pop_reg r45
918 pop_reg r46
919 pop_reg r47
920 pop_reg r48
921 pop_reg r49
922 pop_reg r50
923 pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
924 j .Lcontinue_restore_regs
925 STD_ENDPROC(interrupt_return)
926
927 /*
928 * "NMI" interrupts mask ALL interrupts before calling the
929 * handler, and don't check thread flags, etc., on the way
930 * back out. In general, the only things we do here for NMIs
931 * are register save/restore and dataplane kernel-TLB management.
932 * We don't (for example) deal with start/stop of the sched tick.
933 */
934 .pushsection .text.handle_nmi,"ax"
935handle_nmi:
936 finish_interrupt_save handle_nmi
937 {
938 jalr r0
939 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
940 }
941 FEEDBACK_REENTER(handle_nmi)
942 {
943 movei r30, 1
944 move r31, r0
945 }
946 j interrupt_return
947 STD_ENDPROC(handle_nmi)
948
949 /*
950 * Parallel code for syscalls to handle_interrupt.
951 */
952 .pushsection .text.handle_syscall,"ax"
953handle_syscall:
954 finish_interrupt_save handle_syscall
955
956 /* Enable irqs. */
957 TRACE_IRQS_ON
958 IRQ_ENABLE(r20, r21)
959
960 /* Bump the counter for syscalls made on this tile. */
961 moveli r20, hw2_last(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
962 shl16insli r20, r20, hw1(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
963 shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
964 add r20, r20, tp
965 ld4s r21, r20
966 addi r21, r21, 1
967 st4 r20, r21
968
969 /* Trace syscalls, if requested. */
970 GET_THREAD_INFO(r31)
971 addi r31, r31, THREAD_INFO_FLAGS_OFFSET
972 ld r30, r31
973 andi r30, r30, _TIF_SYSCALL_TRACE
974 {
975 addi r30, r31, THREAD_INFO_STATUS_OFFSET - THREAD_INFO_FLAGS_OFFSET
976 beqzt r30, .Lrestore_syscall_regs
977 }
978 jal do_syscall_trace
979 FEEDBACK_REENTER(handle_syscall)
980
981 /*
982 * We always reload our registers from the stack at this
983 * point. They might be valid, if we didn't build with
984 * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
985 * doing syscall tracing, but there are enough cases now that it
986 * seems simplest just to do the reload unconditionally.
987 */
988.Lrestore_syscall_regs:
989 {
990 ld r30, r30
991 PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
992 }
993 pop_reg r0, r11
994 pop_reg r1, r11
995 pop_reg r2, r11
996 pop_reg r3, r11
997 pop_reg r4, r11
998 pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
999 {
1000 ld TREG_SYSCALL_NR_NAME, r11
1001 moveli r21, __NR_syscalls
1002 }
1003
1004 /* Ensure that the syscall number is within the legal range. */
1005 {
1006 moveli r20, hw2(sys_call_table)
1007 blbs r30, .Lcompat_syscall
1008 }
1009 {
1010 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
1011 shl16insli r20, r20, hw1(sys_call_table)
1012 }
1013 {
1014 blbc r21, .Linvalid_syscall
1015 shl16insli r20, r20, hw0(sys_call_table)
1016 }
1017.Lload_syscall_pointer:
1018 shl3add r20, TREG_SYSCALL_NR_NAME, r20
1019 ld r20, r20
1020
1021 /* Jump to syscall handler. */
1022 jalr r20
1023.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
1024
1025 /*
1026 * Write our r0 onto the stack so it gets restored instead
1027 * of whatever the user had there before.
1028 * In compat mode, sign-extend r0 before storing it.
1029 */
1030 {
1031 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1032 blbct r30, 1f
1033 }
1034 addxi r0, r0, 0
10351: st r29, r0
1036
1037.Lsyscall_sigreturn_skip:
1038 FEEDBACK_REENTER(handle_syscall)
1039
1040 /* Do syscall trace again, if requested. */
1041 ld r30, r31
1042 andi r30, r30, _TIF_SYSCALL_TRACE
1043 beqzt r30, 1f
1044 jal do_syscall_trace
1045 FEEDBACK_REENTER(handle_syscall)
10461: j .Lresume_userspace /* jump into middle of interrupt_return */
1047
1048.Lcompat_syscall:
1049 /*
1050 * Load the base of the compat syscall table in r20, and
1051 * range-check the syscall number (duplicated from 64-bit path).
1052 * Sign-extend all the user's passed arguments to make them consistent.
1053 * Also save the original "r(n)" values away in "r(11+n)" in
1054 * case the syscall table entry wants to validate them.
1055 */
1056 moveli r20, hw2(compat_sys_call_table)
1057 {
1058 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
1059 shl16insli r20, r20, hw1(compat_sys_call_table)
1060 }
1061 {
1062 blbc r21, .Linvalid_syscall
1063 shl16insli r20, r20, hw0(compat_sys_call_table)
1064 }
1065 { move r11, r0; addxi r0, r0, 0 }
1066 { move r12, r1; addxi r1, r1, 0 }
1067 { move r13, r2; addxi r2, r2, 0 }
1068 { move r14, r3; addxi r3, r3, 0 }
1069 { move r15, r4; addxi r4, r4, 0 }
1070 { move r16, r5; addxi r5, r5, 0 }
1071 j .Lload_syscall_pointer
1072
1073.Linvalid_syscall:
1074 /* Report an invalid syscall back to the user program */
1075 {
1076 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1077 movei r28, -ENOSYS
1078 }
1079 st r29, r28
1080 j .Lresume_userspace /* jump into middle of interrupt_return */
1081 STD_ENDPROC(handle_syscall)
1082
1083 /* Return the address for oprofile to suppress in backtraces. */
1084STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
1085 lnk r0
1086 {
1087 addli r0, r0, .Lhandle_syscall_link - .
1088 jrp lr
1089 }
1090 STD_ENDPROC(handle_syscall_link_address)
1091
1092STD_ENTRY(ret_from_fork)
1093 jal sim_notify_fork
1094 jal schedule_tail
1095 FEEDBACK_REENTER(ret_from_fork)
1096 j .Lresume_userspace
1097 STD_ENDPROC(ret_from_fork)
1098
1099/* Various stub interrupt handlers and syscall handlers */
1100
1101STD_ENTRY_LOCAL(_kernel_double_fault)
1102 mfspr r1, SPR_EX_CONTEXT_K_0
1103 move r2, lr
1104 move r3, sp
1105 move r4, r52
1106 addi sp, sp, -C_ABI_SAVE_AREA_SIZE
1107 j kernel_double_fault
1108 STD_ENDPROC(_kernel_double_fault)
1109
1110STD_ENTRY_LOCAL(bad_intr)
1111 mfspr r2, SPR_EX_CONTEXT_K_0
1112 panic "Unhandled interrupt %#x: PC %#lx"
1113 STD_ENDPROC(bad_intr)
1114
1115/* Put address of pt_regs in reg and jump. */
1116#define PTREGS_SYSCALL(x, reg) \
1117 STD_ENTRY(_##x); \
1118 { \
1119 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1120 j x \
1121 }; \
1122 STD_ENDPROC(_##x)
1123
1124/*
1125 * Special-case sigreturn to not write r0 to the stack on return.
1126 * This is technically more efficient, but it also avoids difficulties
1127 * in the 64-bit OS when handling 32-bit compat code, since we must not
1128 * sign-extend r0 for the sigreturn return-value case.
1129 */
1130#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
1131 STD_ENTRY(_##x); \
1132 addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
1133 { \
1134 PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1135 j x \
1136 }; \
1137 STD_ENDPROC(_##x)
1138
1139PTREGS_SYSCALL(sys_execve, r3)
1140PTREGS_SYSCALL(sys_sigaltstack, r2)
1141PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
1142#ifdef CONFIG_COMPAT
1143PTREGS_SYSCALL(compat_sys_execve, r3)
1144PTREGS_SYSCALL(compat_sys_sigaltstack, r2)
1145PTREGS_SYSCALL_SIGRETURN(compat_sys_rt_sigreturn, r0)
1146#endif
1147
1148/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
1149STD_ENTRY(_sys_clone)
1150 push_extra_callee_saves r4
1151 j sys_clone
1152 STD_ENDPROC(_sys_clone)
1153
1154/* The single-step support may need to read all the registers. */
1155int_unalign:
1156 push_extra_callee_saves r0
1157 j do_trap
1158
1159/* Include .intrpt1 array of interrupt vectors */
1160 .section ".intrpt1", "ax"
1161
1162#define op_handle_perf_interrupt bad_intr
1163#define op_handle_aux_perf_interrupt bad_intr
1164
1165#ifndef CONFIG_HARDWALL
1166#define do_hardwall_trap bad_intr
1167#endif
1168
1169 int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
1170 int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
1171#if CONFIG_KERNEL_PL == 2
1172 int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle
1173 int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, bad_intr
1174#else
1175 int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, bad_intr
1176 int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, gx_singlestep_handle
1177#endif
1178 int_hand INT_SINGLE_STEP_0, SINGLE_STEP_0, bad_intr
1179 int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
1180 int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
1181 int_hand INT_ITLB_MISS, ITLB_MISS, do_page_fault
1182 int_hand INT_ILL, ILL, do_trap
1183 int_hand INT_GPV, GPV, do_trap
1184 int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
1185 int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
1186 int_hand INT_SWINT_3, SWINT_3, do_trap
1187 int_hand INT_SWINT_2, SWINT_2, do_trap
1188 int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
1189 int_hand INT_SWINT_0, SWINT_0, do_trap
1190 int_hand INT_ILL_TRANS, ILL_TRANS, do_trap
1191 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1192 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1193 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1194 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
1195 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1196 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1197 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
1198 int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
1199 int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
1200 int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
1201 int_hand INT_IPI_3, IPI_3, bad_intr
1202#if CONFIG_KERNEL_PL == 2
1203 int_hand INT_IPI_2, IPI_2, tile_dev_intr
1204 int_hand INT_IPI_1, IPI_1, bad_intr
1205#else
1206 int_hand INT_IPI_2, IPI_2, bad_intr
1207 int_hand INT_IPI_1, IPI_1, tile_dev_intr
1208#endif
1209 int_hand INT_IPI_0, IPI_0, bad_intr
1210 int_hand INT_PERF_COUNT, PERF_COUNT, \
1211 op_handle_perf_interrupt, handle_nmi
1212 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
1213 op_handle_perf_interrupt, handle_nmi
1214 int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
1215#if CONFIG_KERNEL_PL == 2
1216 dc_dispatch INT_INTCTRL_2, INTCTRL_2
1217 int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
1218#else
1219 int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1220 dc_dispatch INT_INTCTRL_1, INTCTRL_1
1221#endif
1222 int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1223 int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1224 hv_message_intr
1225 int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, bad_intr
1226 int_hand INT_I_ASID, I_ASID, bad_intr
1227 int_hand INT_D_ASID, D_ASID, bad_intr
1228 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
1229
1230 /* Synthetic interrupt delivered only by the simulator */
1231 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index e2ab82b7c7e7..f68df69f1f67 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -22,6 +22,7 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <asm/opcode-tile.h> 23#include <asm/opcode-tile.h>
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
25#include <asm/homecache.h>
25 26
26#ifdef __tilegx__ 27#ifdef __tilegx__
27# define Elf_Rela Elf64_Rela 28# define Elf_Rela Elf64_Rela
@@ -86,8 +87,13 @@ error:
86void module_free(struct module *mod, void *module_region) 87void module_free(struct module *mod, void *module_region)
87{ 88{
88 vfree(module_region); 89 vfree(module_region);
90
91 /* Globally flush the L1 icache. */
92 flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
93 0, 0, 0, NULL, NULL, 0);
94
89 /* 95 /*
90 * FIXME: If module_region == mod->init_region, trim exception 96 * FIXME: If module_region == mod->module_init, trim exception
91 * table entries. 97 * table entries.
92 */ 98 */
93} 99}
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 658752b2835e..658f2ce426a4 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(dma_sync_single_range_for_device);
244 * dma_alloc_noncoherent() returns non-cacheable memory, so there's no 244 * dma_alloc_noncoherent() returns non-cacheable memory, so there's no
245 * need to do any flushing here. 245 * need to do any flushing here.
246 */ 246 */
247void dma_cache_sync(void *vaddr, size_t size, 247void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
248 enum dma_data_direction direction) 248 enum dma_data_direction direction)
249{ 249{
250} 250}
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index ea38f0c9ec7c..6d4cb5d7a9fd 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -59,6 +59,7 @@ int __write_once tile_plx_gen1;
59 59
60static struct pci_controller controllers[TILE_NUM_PCIE]; 60static struct pci_controller controllers[TILE_NUM_PCIE];
61static int num_controllers; 61static int num_controllers;
62static int pci_scan_flags[TILE_NUM_PCIE];
62 63
63static struct pci_ops tile_cfg_ops; 64static struct pci_ops tile_cfg_ops;
64 65
@@ -79,7 +80,7 @@ EXPORT_SYMBOL(pcibios_align_resource);
79 * controller_id is the controller number, config type is 0 or 1 for 80 * controller_id is the controller number, config type is 0 or 1 for
80 * config0 or config1 operations. 81 * config0 or config1 operations.
81 */ 82 */
82static int __init tile_pcie_open(int controller_id, int config_type) 83static int __devinit tile_pcie_open(int controller_id, int config_type)
83{ 84{
84 char filename[32]; 85 char filename[32];
85 int fd; 86 int fd;
@@ -95,7 +96,7 @@ static int __init tile_pcie_open(int controller_id, int config_type)
95/* 96/*
96 * Get the IRQ numbers from the HV and set up the handlers for them. 97 * Get the IRQ numbers from the HV and set up the handlers for them.
97 */ 98 */
98static int __init tile_init_irqs(int controller_id, 99static int __devinit tile_init_irqs(int controller_id,
99 struct pci_controller *controller) 100 struct pci_controller *controller)
100{ 101{
101 char filename[32]; 102 char filename[32];
@@ -139,71 +140,74 @@ static int __init tile_init_irqs(int controller_id,
139 * 140 *
140 * Returns the number of controllers discovered. 141 * Returns the number of controllers discovered.
141 */ 142 */
142int __init tile_pci_init(void) 143int __devinit tile_pci_init(void)
143{ 144{
144 int i; 145 int i;
145 146
146 pr_info("PCI: Searching for controllers...\n"); 147 pr_info("PCI: Searching for controllers...\n");
147 148
149 /* Re-init number of PCIe controllers to support hot-plug feature. */
150 num_controllers = 0;
151
148 /* Do any configuration we need before using the PCIe */ 152 /* Do any configuration we need before using the PCIe */
149 153
150 for (i = 0; i < TILE_NUM_PCIE; i++) { 154 for (i = 0; i < TILE_NUM_PCIE; i++) {
151 int hv_cfg_fd0 = -1;
152 int hv_cfg_fd1 = -1;
153 int hv_mem_fd = -1;
154 char name[32];
155 struct pci_controller *controller;
156
157 /* 155 /*
158 * Open the fd to the HV. If it fails then this 156 * To see whether we need a real config op based on
159 * device doesn't exist. 157 * the results of pcibios_init(), to support PCIe hot-plug.
160 */ 158 */
161 hv_cfg_fd0 = tile_pcie_open(i, 0); 159 if (pci_scan_flags[i] == 0) {
162 if (hv_cfg_fd0 < 0) 160 int hv_cfg_fd0 = -1;
163 continue; 161 int hv_cfg_fd1 = -1;
164 hv_cfg_fd1 = tile_pcie_open(i, 1); 162 int hv_mem_fd = -1;
165 if (hv_cfg_fd1 < 0) { 163 char name[32];
166 pr_err("PCI: Couldn't open config fd to HV " 164 struct pci_controller *controller;
167 "for controller %d\n", i); 165
168 goto err_cont; 166 /*
169 } 167 * Open the fd to the HV. If it fails then this
170 168 * device doesn't exist.
171 sprintf(name, "pcie/%d/mem", i); 169 */
172 hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0); 170 hv_cfg_fd0 = tile_pcie_open(i, 0);
173 if (hv_mem_fd < 0) { 171 if (hv_cfg_fd0 < 0)
174 pr_err("PCI: Could not open mem fd to HV!\n"); 172 continue;
175 goto err_cont; 173 hv_cfg_fd1 = tile_pcie_open(i, 1);
176 } 174 if (hv_cfg_fd1 < 0) {
175 pr_err("PCI: Couldn't open config fd to HV "
176 "for controller %d\n", i);
177 goto err_cont;
178 }
177 179
178 pr_info("PCI: Found PCI controller #%d\n", i); 180 sprintf(name, "pcie/%d/mem", i);
181 hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
182 if (hv_mem_fd < 0) {
183 pr_err("PCI: Could not open mem fd to HV!\n");
184 goto err_cont;
185 }
179 186
180 controller = &controllers[num_controllers]; 187 pr_info("PCI: Found PCI controller #%d\n", i);
181 188
182 if (tile_init_irqs(i, controller)) { 189 controller = &controllers[i];
183 pr_err("PCI: Could not initialize "
184 "IRQs, aborting.\n");
185 goto err_cont;
186 }
187 190
188 controller->index = num_controllers; 191 controller->index = i;
189 controller->hv_cfg_fd[0] = hv_cfg_fd0; 192 controller->hv_cfg_fd[0] = hv_cfg_fd0;
190 controller->hv_cfg_fd[1] = hv_cfg_fd1; 193 controller->hv_cfg_fd[1] = hv_cfg_fd1;
191 controller->hv_mem_fd = hv_mem_fd; 194 controller->hv_mem_fd = hv_mem_fd;
192 controller->first_busno = 0; 195 controller->first_busno = 0;
193 controller->last_busno = 0xff; 196 controller->last_busno = 0xff;
194 controller->ops = &tile_cfg_ops; 197 controller->ops = &tile_cfg_ops;
195 198
196 num_controllers++; 199 num_controllers++;
197 continue; 200 continue;
198 201
199err_cont: 202err_cont:
200 if (hv_cfg_fd0 >= 0) 203 if (hv_cfg_fd0 >= 0)
201 hv_dev_close(hv_cfg_fd0); 204 hv_dev_close(hv_cfg_fd0);
202 if (hv_cfg_fd1 >= 0) 205 if (hv_cfg_fd1 >= 0)
203 hv_dev_close(hv_cfg_fd1); 206 hv_dev_close(hv_cfg_fd1);
204 if (hv_mem_fd >= 0) 207 if (hv_mem_fd >= 0)
205 hv_dev_close(hv_mem_fd); 208 hv_dev_close(hv_mem_fd);
206 continue; 209 continue;
210 }
207 } 211 }
208 212
209 /* 213 /*
@@ -232,7 +236,7 @@ static int tile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
232} 236}
233 237
234 238
235static void __init fixup_read_and_payload_sizes(void) 239static void __devinit fixup_read_and_payload_sizes(void)
236{ 240{
237 struct pci_dev *dev = NULL; 241 struct pci_dev *dev = NULL;
238 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */ 242 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
@@ -282,7 +286,7 @@ static void __init fixup_read_and_payload_sizes(void)
282 * The controllers have been set up by the time we get here, by a call to 286 * The controllers have been set up by the time we get here, by a call to
283 * tile_pci_init. 287 * tile_pci_init.
284 */ 288 */
285static int __init pcibios_init(void) 289int __devinit pcibios_init(void)
286{ 290{
287 int i; 291 int i;
288 292
@@ -296,25 +300,36 @@ static int __init pcibios_init(void)
296 mdelay(250); 300 mdelay(250);
297 301
298 /* Scan all of the recorded PCI controllers. */ 302 /* Scan all of the recorded PCI controllers. */
299 for (i = 0; i < num_controllers; i++) { 303 for (i = 0; i < TILE_NUM_PCIE; i++) {
300 struct pci_controller *controller = &controllers[i];
301 struct pci_bus *bus;
302
303 pr_info("PCI: initializing controller #%d\n", i);
304
305 /* 304 /*
306 * This comes from the generic Linux PCI driver. 305 * Do real pcibios init ops if the controller is initialized
307 * 306 * by tile_pci_init() successfully and not initialized by
308 * It reads the PCI tree for this bus into the Linux 307 * pcibios_init() yet to support PCIe hot-plug.
309 * data structures.
310 *
311 * This is inlined in linux/pci.h and calls into
312 * pci_scan_bus_parented() in probe.c.
313 */ 308 */
314 bus = pci_scan_bus(0, controller->ops, controller); 309 if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
315 controller->root_bus = bus; 310 struct pci_controller *controller = &controllers[i];
316 controller->last_busno = bus->subordinate; 311 struct pci_bus *bus;
317 312
313 if (tile_init_irqs(i, controller)) {
314 pr_err("PCI: Could not initialize IRQs\n");
315 continue;
316 }
317
318 pr_info("PCI: initializing controller #%d\n", i);
319
320 /*
321 * This comes from the generic Linux PCI driver.
322 *
323 * It reads the PCI tree for this bus into the Linux
324 * data structures.
325 *
326 * This is inlined in linux/pci.h and calls into
327 * pci_scan_bus_parented() in probe.c.
328 */
329 bus = pci_scan_bus(0, controller->ops, controller);
330 controller->root_bus = bus;
331 controller->last_busno = bus->subordinate;
332 }
318 } 333 }
319 334
320 /* Do machine dependent PCI interrupt routing */ 335 /* Do machine dependent PCI interrupt routing */
@@ -326,34 +341,45 @@ static int __init pcibios_init(void)
326 * It allocates all of the resources (I/O memory, etc) 341 * It allocates all of the resources (I/O memory, etc)
327 * associated with the devices read in above. 342 * associated with the devices read in above.
328 */ 343 */
329
330 pci_assign_unassigned_resources(); 344 pci_assign_unassigned_resources();
331 345
332 /* Configure the max_read_size and max_payload_size values. */ 346 /* Configure the max_read_size and max_payload_size values. */
333 fixup_read_and_payload_sizes(); 347 fixup_read_and_payload_sizes();
334 348
335 /* Record the I/O resources in the PCI controller structure. */ 349 /* Record the I/O resources in the PCI controller structure. */
336 for (i = 0; i < num_controllers; i++) { 350 for (i = 0; i < TILE_NUM_PCIE; i++) {
337 struct pci_bus *root_bus = controllers[i].root_bus; 351 /*
338 struct pci_bus *next_bus; 352 * Do real pcibios init ops if the controller is initialized
339 struct pci_dev *dev; 353 * by tile_pci_init() successfully and not initialized by
340 354 * pcibios_init() yet to support PCIe hot-plug.
341 list_for_each_entry(dev, &root_bus->devices, bus_list) { 355 */
342 /* Find the PCI host controller, ie. the 1st bridge. */ 356 if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
343 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && 357 struct pci_bus *root_bus = controllers[i].root_bus;
344 (PCI_SLOT(dev->devfn) == 0)) { 358 struct pci_bus *next_bus;
345 next_bus = dev->subordinate; 359 struct pci_dev *dev;
346 controllers[i].mem_resources[0] = 360
347 *next_bus->resource[0]; 361 list_for_each_entry(dev, &root_bus->devices, bus_list) {
348 controllers[i].mem_resources[1] = 362 /*
349 *next_bus->resource[1]; 363 * Find the PCI host controller, ie. the 1st
350 controllers[i].mem_resources[2] = 364 * bridge.
351 *next_bus->resource[2]; 365 */
352 366 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
353 break; 367 (PCI_SLOT(dev->devfn) == 0)) {
368 next_bus = dev->subordinate;
369 controllers[i].mem_resources[0] =
370 *next_bus->resource[0];
371 controllers[i].mem_resources[1] =
372 *next_bus->resource[1];
373 controllers[i].mem_resources[2] =
374 *next_bus->resource[2];
375
376 /* Setup flags. */
377 pci_scan_flags[i] = 1;
378
379 break;
380 }
354 } 381 }
355 } 382 }
356
357 } 383 }
358 384
359 return 0; 385 return 0;
@@ -381,7 +407,7 @@ char __devinit *pcibios_setup(char *str)
381/* 407/*
382 * This is called from the generic Linux layer. 408 * This is called from the generic Linux layer.
383 */ 409 */
384void __init pcibios_update_irq(struct pci_dev *dev, int irq) 410void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
385{ 411{
386 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); 412 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
387} 413}
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index d0065103eb7b..9c45d8bbdf57 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -25,10 +25,13 @@
25#include <linux/hardirq.h> 25#include <linux/hardirq.h>
26#include <linux/syscalls.h> 26#include <linux/syscalls.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/tracehook.h>
29#include <linux/signal.h>
28#include <asm/system.h> 30#include <asm/system.h>
29#include <asm/stack.h> 31#include <asm/stack.h>
30#include <asm/homecache.h> 32#include <asm/homecache.h>
31#include <asm/syscalls.h> 33#include <asm/syscalls.h>
34#include <asm/traps.h>
32#ifdef CONFIG_HARDWALL 35#ifdef CONFIG_HARDWALL
33#include <asm/hardwall.h> 36#include <asm/hardwall.h>
34#endif 37#endif
@@ -546,6 +549,51 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
546 return __switch_to(prev, next, next_current_ksp0(next)); 549 return __switch_to(prev, next, next_current_ksp0(next));
547} 550}
548 551
552/*
553 * This routine is called on return from interrupt if any of the
554 * TIF_WORK_MASK flags are set in thread_info->flags. It is
555 * entered with interrupts disabled so we don't miss an event
556 * that modified the thread_info flags. If any flag is set, we
557 * handle it and return, and the calling assembly code will
558 * re-disable interrupts, reload the thread flags, and call back
559 * if more flags need to be handled.
560 *
561 * We return whether we need to check the thread_info flags again
562 * or not. Note that we don't clear TIF_SINGLESTEP here, so it's
563 * important that it be tested last, and then claim that we don't
564 * need to recheck the flags.
565 */
566int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
567{
568 if (thread_info_flags & _TIF_NEED_RESCHED) {
569 schedule();
570 return 1;
571 }
572#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
573 if (thread_info_flags & _TIF_ASYNC_TLB) {
574 do_async_page_fault(regs);
575 return 1;
576 }
577#endif
578 if (thread_info_flags & _TIF_SIGPENDING) {
579 do_signal(regs);
580 return 1;
581 }
582 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
583 clear_thread_flag(TIF_NOTIFY_RESUME);
584 tracehook_notify_resume(regs);
585 if (current->replacement_session_keyring)
586 key_replace_session_keyring();
587 return 1;
588 }
589 if (thread_info_flags & _TIF_SINGLESTEP) {
590 if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0)
591 single_step_once(regs);
592 return 0;
593 }
594 panic("work_pending: bad flags %#x\n", thread_info_flags);
595}
596
549/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */ 597/* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
550SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, 598SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
551 void __user *, parent_tidptr, void __user *, child_tidptr, 599 void __user *, parent_tidptr, void __user *, child_tidptr,
@@ -582,8 +630,8 @@ out:
582 630
583#ifdef CONFIG_COMPAT 631#ifdef CONFIG_COMPAT
584long compat_sys_execve(const char __user *path, 632long compat_sys_execve(const char __user *path,
585 const compat_uptr_t __user *argv, 633 compat_uptr_t __user *argv,
586 const compat_uptr_t __user *envp, 634 compat_uptr_t __user *envp,
587 struct pt_regs *regs) 635 struct pt_regs *regs)
588{ 636{
589 long error; 637 long error;
diff --git a/arch/tile/kernel/regs_64.S b/arch/tile/kernel/regs_64.S
new file mode 100644
index 000000000000..f748c1e85285
--- /dev/null
+++ b/arch/tile/kernel/regs_64.S
@@ -0,0 +1,145 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/linkage.h>
16#include <asm/system.h>
17#include <asm/ptrace.h>
18#include <asm/asm-offsets.h>
19#include <arch/spr_def.h>
20#include <asm/processor.h>
21
22/*
23 * See <asm/system.h>; called with prev and next task_struct pointers.
24 * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
25 *
26 * We want to save pc/sp in "prev", and get the new pc/sp from "next".
27 * We also need to save all the callee-saved registers on the stack.
28 *
29 * Intel enables/disables access to the hardware cycle counter in
30 * seccomp (secure computing) environments if necessary, based on
31 * has_secure_computing(). We might want to do this at some point,
32 * though it would require virtualizing the other SPRs under WORLD_ACCESS.
33 *
34 * Since we're saving to the stack, we omit sp from this list.
35 * And for parallels with other architectures, we save lr separately,
36 * in the thread_struct itself (as the "pc" field).
37 *
38 * This code also needs to be aligned with process.c copy_thread()
39 */
40
41#if CALLEE_SAVED_REGS_COUNT != 24
42# error Mismatch between <asm/system.h> and kernel/entry.S
43#endif
44#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 8)
45
46#define SAVE_REG(r) { st r12, r; addi r12, r12, 8 }
47#define LOAD_REG(r) { ld r, r12; addi r12, r12, 8 }
48#define FOR_EACH_CALLEE_SAVED_REG(f) \
49 f(r30); f(r31); \
50 f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \
51 f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \
52 f(r48); f(r49); f(r50); f(r51); f(r52);
53
54STD_ENTRY_SECTION(__switch_to, .sched.text)
55 {
56 move r10, sp
57 st sp, lr
58 }
59 {
60 addli r11, sp, -FRAME_SIZE + 8
61 addli sp, sp, -FRAME_SIZE
62 }
63 {
64 st r11, r10
65 addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET
66 }
67 {
68 ld r13, r4 /* Load new sp to a temp register early. */
69 addi r12, sp, 16
70 }
71 FOR_EACH_CALLEE_SAVED_REG(SAVE_REG)
72 addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET
73 {
74 st r3, sp
75 addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET
76 }
77 {
78 st r3, lr
79 addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET
80 }
81 {
82 ld lr, r4
83 addi r12, r13, 16
84 }
85 {
86 /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */
87 move sp, r13
88 mtspr SPR_SYSTEM_SAVE_K_0, r2
89 }
90 FOR_EACH_CALLEE_SAVED_REG(LOAD_REG)
91.L__switch_to_pc:
92 {
93 addli sp, sp, FRAME_SIZE
94 jrp lr /* r0 is still valid here, so return it */
95 }
96 STD_ENDPROC(__switch_to)
97
98/* Return a suitable address for the backtracer for suspended threads */
99STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
100 lnk r0
101 {
102 addli r0, r0, .L__switch_to_pc - .
103 jrp lr
104 }
105 STD_ENDPROC(get_switch_to_pc)
106
107STD_ENTRY(get_pt_regs)
108 .irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \
109 r8, r9, r10, r11, r12, r13, r14, r15, \
110 r16, r17, r18, r19, r20, r21, r22, r23, \
111 r24, r25, r26, r27, r28, r29, r30, r31, \
112 r32, r33, r34, r35, r36, r37, r38, r39, \
113 r40, r41, r42, r43, r44, r45, r46, r47, \
114 r48, r49, r50, r51, r52, tp, sp
115 {
116 st r0, \reg
117 addi r0, r0, 8
118 }
119 .endr
120 {
121 st r0, lr
122 addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR
123 }
124 lnk r1
125 {
126 st r0, r1
127 addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
128 }
129 mfspr r1, INTERRUPT_CRITICAL_SECTION
130 shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT
131 ori r1, r1, KERNEL_PL
132 {
133 st r0, r1
134 addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
135 }
136 {
137 st r0, zero /* clear faultnum */
138 addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM
139 }
140 {
141 st r0, zero /* clear orig_r0 */
142 addli r0, r0, -PTREGS_OFFSET_ORIG_R0 /* restore r0 to base */
143 }
144 jrp lr
145 STD_ENDPROC(get_pt_regs)
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 3696b1832566..6cdc9ba55fe0 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -912,6 +912,8 @@ void __cpuinit setup_cpu(int boot)
912#endif 912#endif
913} 913}
914 914
915#ifdef CONFIG_BLK_DEV_INITRD
916
915static int __initdata set_initramfs_file; 917static int __initdata set_initramfs_file;
916static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; 918static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
917 919
@@ -969,6 +971,10 @@ void __init free_initrd_mem(unsigned long begin, unsigned long end)
969 free_bootmem(__pa(begin), end - begin); 971 free_bootmem(__pa(begin), end - begin);
970} 972}
971 973
974#else
975static inline void load_hv_initrd(void) {}
976#endif /* CONFIG_BLK_DEV_INITRD */
977
972static void __init validate_hv(void) 978static void __init validate_hv(void)
973{ 979{
974 /* 980 /*
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 1260321155f1..bedaf4e9f3a7 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -39,7 +39,6 @@
39 39
40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
41 41
42
43SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss, 42SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
44 stack_t __user *, uoss, struct pt_regs *, regs) 43 stack_t __user *, uoss, struct pt_regs *, regs)
45{ 44{
@@ -78,6 +77,13 @@ int restore_sigcontext(struct pt_regs *regs,
78 return err; 77 return err;
79} 78}
80 79
80void signal_fault(const char *type, struct pt_regs *regs,
81 void __user *frame, int sig)
82{
83 trace_unhandled_signal(type, regs, (unsigned long)frame, SIGSEGV);
84 force_sigsegv(sig, current);
85}
86
81/* The assembly shim for this function arranges to ignore the return value. */ 87/* The assembly shim for this function arranges to ignore the return value. */
82SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs) 88SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
83{ 89{
@@ -105,7 +111,7 @@ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
105 return 0; 111 return 0;
106 112
107badframe: 113badframe:
108 force_sig(SIGSEGV, current); 114 signal_fault("bad sigreturn frame", regs, frame, 0);
109 return 0; 115 return 0;
110} 116}
111 117
@@ -231,7 +237,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
231 return 0; 237 return 0;
232 238
233give_sigsegv: 239give_sigsegv:
234 force_sigsegv(sig, current); 240 signal_fault("bad setup frame", regs, frame, sig);
235 return -EFAULT; 241 return -EFAULT;
236} 242}
237 243
@@ -245,7 +251,6 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
245{ 251{
246 int ret; 252 int ret;
247 253
248
249 /* Are we from a system call? */ 254 /* Are we from a system call? */
250 if (regs->faultnum == INT_SWINT_1) { 255 if (regs->faultnum == INT_SWINT_1) {
251 /* If so, check system call restarting.. */ 256 /* If so, check system call restarting.. */
@@ -363,3 +368,118 @@ done:
363 /* Avoid double syscall restart if there are nested signals. */ 368 /* Avoid double syscall restart if there are nested signals. */
364 regs->faultnum = INT_SWINT_1_SIGRETURN; 369 regs->faultnum = INT_SWINT_1_SIGRETURN;
365} 370}
371
372int show_unhandled_signals = 1;
373
374static int __init crashinfo(char *str)
375{
376 unsigned long val;
377 const char *word;
378
379 if (*str == '\0')
380 val = 2;
381 else if (*str != '=' || strict_strtoul(++str, 0, &val) != 0)
382 return 0;
383 show_unhandled_signals = val;
384 switch (show_unhandled_signals) {
385 case 0:
386 word = "No";
387 break;
388 case 1:
389 word = "One-line";
390 break;
391 default:
392 word = "Detailed";
393 break;
394 }
395 pr_info("%s crash reports will be generated on the console\n", word);
396 return 1;
397}
398__setup("crashinfo", crashinfo);
399
400static void dump_mem(void __user *address)
401{
402 void __user *addr;
403 enum { region_size = 256, bytes_per_line = 16 };
404 int i, j, k;
405 int found_readable_mem = 0;
406
407 pr_err("\n");
408 if (!access_ok(VERIFY_READ, address, 1)) {
409 pr_err("Not dumping at address 0x%lx (kernel address)\n",
410 (unsigned long)address);
411 return;
412 }
413
414 addr = (void __user *)
415 (((unsigned long)address & -bytes_per_line) - region_size/2);
416 if (addr > address)
417 addr = NULL;
418 for (i = 0; i < region_size;
419 addr += bytes_per_line, i += bytes_per_line) {
420 unsigned char buf[bytes_per_line];
421 char line[100];
422 if (copy_from_user(buf, addr, bytes_per_line))
423 continue;
424 if (!found_readable_mem) {
425 pr_err("Dumping memory around address 0x%lx:\n",
426 (unsigned long)address);
427 found_readable_mem = 1;
428 }
429 j = sprintf(line, REGFMT":", (unsigned long)addr);
430 for (k = 0; k < bytes_per_line; ++k)
431 j += sprintf(&line[j], " %02x", buf[k]);
432 pr_err("%s\n", line);
433 }
434 if (!found_readable_mem)
435 pr_err("No readable memory around address 0x%lx\n",
436 (unsigned long)address);
437}
438
439void trace_unhandled_signal(const char *type, struct pt_regs *regs,
440 unsigned long address, int sig)
441{
442 struct task_struct *tsk = current;
443
444 if (show_unhandled_signals == 0)
445 return;
446
447 /* If the signal is handled, don't show it here. */
448 if (!is_global_init(tsk)) {
449 void __user *handler =
450 tsk->sighand->action[sig-1].sa.sa_handler;
451 if (handler != SIG_IGN && handler != SIG_DFL)
452 return;
453 }
454
455 /* Rate-limit the one-line output, not the detailed output. */
456 if (show_unhandled_signals <= 1 && !printk_ratelimit())
457 return;
458
459 printk("%s%s[%d]: %s at %lx pc "REGFMT" signal %d",
460 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
461 tsk->comm, task_pid_nr(tsk), type, address, regs->pc, sig);
462
463 print_vma_addr(KERN_CONT " in ", regs->pc);
464
465 printk(KERN_CONT "\n");
466
467 if (show_unhandled_signals > 1) {
468 switch (sig) {
469 case SIGILL:
470 case SIGFPE:
471 case SIGSEGV:
472 case SIGBUS:
473 pr_err("User crash: signal %d,"
474 " trap %ld, address 0x%lx\n",
475 sig, regs->faultnum, address);
476 show_regs(regs);
477 dump_mem((void __user *)address);
478 break;
479 default:
480 pr_err("User crash: signal %d, trap %ld\n",
481 sig, regs->faultnum);
482 break;
483 }
484 }
485}
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 84a729e06ec4..4032ca8e51b6 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -186,6 +186,8 @@ static tile_bundle_bits rewrite_load_store_unaligned(
186 .si_code = SEGV_MAPERR, 186 .si_code = SEGV_MAPERR,
187 .si_addr = addr 187 .si_addr = addr
188 }; 188 };
189 trace_unhandled_signal("segfault", regs,
190 (unsigned long)addr, SIGSEGV);
189 force_sig_info(info.si_signo, &info, current); 191 force_sig_info(info.si_signo, &info, current);
190 return (tile_bundle_bits) 0; 192 return (tile_bundle_bits) 0;
191 } 193 }
@@ -196,6 +198,8 @@ static tile_bundle_bits rewrite_load_store_unaligned(
196 .si_code = BUS_ADRALN, 198 .si_code = BUS_ADRALN,
197 .si_addr = addr 199 .si_addr = addr
198 }; 200 };
201 trace_unhandled_signal("unaligned trap", regs,
202 (unsigned long)addr, SIGBUS);
199 force_sig_info(info.si_signo, &info, current); 203 force_sig_info(info.si_signo, &info, current);
200 return (tile_bundle_bits) 0; 204 return (tile_bundle_bits) 0;
201 } 205 }
@@ -318,6 +322,14 @@ void single_step_once(struct pt_regs *regs)
318" .popsection\n" 322" .popsection\n"
319 ); 323 );
320 324
325 /*
326 * Enable interrupts here to allow touching userspace and the like.
327 * The callers expect this: do_trap() already has interrupts
328 * enabled, and do_work_pending() handles functions that enable
329 * interrupts internally.
330 */
331 local_irq_enable();
332
321 if (state == NULL) { 333 if (state == NULL) {
322 /* allocate a page of writable, executable memory */ 334 /* allocate a page of writable, executable memory */
323 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); 335 state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index dd81713a90dc..37ee4d037e0b 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -36,7 +36,7 @@
36#define KBT_LOOP 3 /* Backtrace entered a loop */ 36#define KBT_LOOP 3 /* Backtrace entered a loop */
37 37
38/* Is address on the specified kernel stack? */ 38/* Is address on the specified kernel stack? */
39static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp) 39static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
40{ 40{
41 ulong kstack_base = (ulong) kbt->task->stack; 41 ulong kstack_base = (ulong) kbt->task->stack;
42 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */ 42 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
@@ -45,7 +45,7 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp)
45} 45}
46 46
47/* Is address valid for reading? */ 47/* Is address valid for reading? */
48static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) 48static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
49{ 49{
50 HV_PTE *l1_pgtable = kbt->pgtable; 50 HV_PTE *l1_pgtable = kbt->pgtable;
51 HV_PTE *l2_pgtable; 51 HV_PTE *l2_pgtable;
@@ -97,7 +97,7 @@ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address)
97} 97}
98 98
99/* Callback for backtracer; basically a glorified memcpy */ 99/* Callback for backtracer; basically a glorified memcpy */
100static bool read_memory_func(void *result, VirtualAddress address, 100static bool read_memory_func(void *result, unsigned long address,
101 unsigned int size, void *vkbt) 101 unsigned int size, void *vkbt)
102{ 102{
103 int retval; 103 int retval;
@@ -124,7 +124,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
124{ 124{
125 const char *fault = NULL; /* happy compiler */ 125 const char *fault = NULL; /* happy compiler */
126 char fault_buf[64]; 126 char fault_buf[64];
127 VirtualAddress sp = kbt->it.sp; 127 unsigned long sp = kbt->it.sp;
128 struct pt_regs *p; 128 struct pt_regs *p;
129 129
130 if (!in_kernel_stack(kbt, sp)) 130 if (!in_kernel_stack(kbt, sp))
@@ -163,7 +163,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
163} 163}
164 164
165/* Is the pc pointing to a sigreturn trampoline? */ 165/* Is the pc pointing to a sigreturn trampoline? */
166static int is_sigreturn(VirtualAddress pc) 166static int is_sigreturn(unsigned long pc)
167{ 167{
168 return (pc == VDSO_BASE); 168 return (pc == VDSO_BASE);
169} 169}
@@ -260,7 +260,7 @@ static void validate_stack(struct pt_regs *regs)
260void KBacktraceIterator_init(struct KBacktraceIterator *kbt, 260void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
261 struct task_struct *t, struct pt_regs *regs) 261 struct task_struct *t, struct pt_regs *regs)
262{ 262{
263 VirtualAddress pc, lr, sp, r52; 263 unsigned long pc, lr, sp, r52;
264 int is_current; 264 int is_current;
265 265
266 /* 266 /*
@@ -331,7 +331,7 @@ EXPORT_SYMBOL(KBacktraceIterator_end);
331 331
332void KBacktraceIterator_next(struct KBacktraceIterator *kbt) 332void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
333{ 333{
334 VirtualAddress old_pc = kbt->it.pc, old_sp = kbt->it.sp; 334 unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
335 kbt->new_context = 0; 335 kbt->new_context = 0;
336 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) { 336 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
337 kbt->end = KBT_DONE; 337 kbt->end = KBT_DONE;
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index e2187d24a9b4..cb44ba7ccd2d 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -56,13 +56,6 @@ ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count)
56 return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count); 56 return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count);
57} 57}
58 58
59long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
60 u32 len, int advice)
61{
62 return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo,
63 len, advice);
64}
65
66int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, 59int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
67 u32 len_lo, u32 len_hi, int advice) 60 u32 len_lo, u32 len_hi, int advice)
68{ 61{
@@ -103,10 +96,8 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
103 96
104#ifndef __tilegx__ 97#ifndef __tilegx__
105/* See comments at the top of the file. */ 98/* See comments at the top of the file. */
106#define sys_fadvise64 sys32_fadvise64
107#define sys_fadvise64_64 sys32_fadvise64_64 99#define sys_fadvise64_64 sys32_fadvise64_64
108#define sys_readahead sys32_readahead 100#define sys_readahead sys32_readahead
109#define sys_sync_file_range sys_sync_file_range2
110#endif 101#endif
111 102
112/* Call the trampolines to manage pt_regs where necessary. */ 103/* Call the trampolines to manage pt_regs where necessary. */
diff --git a/arch/tile/kernel/tile-desc_32.c b/arch/tile/kernel/tile-desc_32.c
index 69af0e150f78..7e31a1285788 100644
--- a/arch/tile/kernel/tile-desc_32.c
+++ b/arch/tile/kernel/tile-desc_32.c
@@ -2413,12 +2413,13 @@ const struct tile_operand tile_operands[43] =
2413 2413
2414 2414
2415 2415
2416/* Given a set of bundle bits and the lookup FSM for a specific pipe, 2416/* Given a set of bundle bits and a specific pipe, returns which
2417 * returns which instruction the bundle contains in that pipe. 2417 * instruction the bundle contains in that pipe.
2418 */ 2418 */
2419static const struct tile_opcode * 2419const struct tile_opcode *
2420find_opcode(tile_bundle_bits bits, const unsigned short *table) 2420find_opcode(tile_bundle_bits bits, tile_pipeline pipe)
2421{ 2421{
2422 const unsigned short *table = tile_bundle_decoder_fsms[pipe];
2422 int index = 0; 2423 int index = 0;
2423 2424
2424 while (1) 2425 while (1)
@@ -2465,7 +2466,7 @@ parse_insn_tile(tile_bundle_bits bits,
2465 int i; 2466 int i;
2466 2467
2467 d = &decoded[num_instructions++]; 2468 d = &decoded[num_instructions++];
2468 opc = find_opcode (bits, tile_bundle_decoder_fsms[pipe]); 2469 opc = find_opcode (bits, (tile_pipeline)pipe);
2469 d->opcode = opc; 2470 d->opcode = opc;
2470 2471
2471 /* Decode each operand, sign extending, etc. as appropriate. */ 2472 /* Decode each operand, sign extending, etc. as appropriate. */
diff --git a/arch/tile/kernel/tile-desc_64.c b/arch/tile/kernel/tile-desc_64.c
new file mode 100644
index 000000000000..d57007bed77f
--- /dev/null
+++ b/arch/tile/kernel/tile-desc_64.c
@@ -0,0 +1,2200 @@
1/* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */
2#define BFD_RELOC(x) -1
3
4/* Special registers. */
5#define TREG_LR 55
6#define TREG_SN 56
7#define TREG_ZERO 63
8
9/* FIXME: Rename this. */
10#include <asm/opcode-tile_64.h>
11
12#include <linux/stddef.h>
13
14const struct tilegx_opcode tilegx_opcodes[334] =
15{
16 { "bpt", TILEGX_OPC_BPT, 0x2, 0, TREG_ZERO, 0,
17 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
18 },
19 { "info", TILEGX_OPC_INFO, 0xf, 1, TREG_ZERO, 1,
20 { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } },
21 },
22 { "infol", TILEGX_OPC_INFOL, 0x3, 1, TREG_ZERO, 1,
23 { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } },
24 },
25 { "move", TILEGX_OPC_MOVE, 0xf, 2, TREG_ZERO, 1,
26 { { 6, 7 }, { 8, 9 }, { 10, 11 }, { 12, 13 }, { 0, } },
27 },
28 { "movei", TILEGX_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1,
29 { { 6, 0 }, { 8, 1 }, { 10, 2 }, { 12, 3 }, { 0, } },
30 },
31 { "moveli", TILEGX_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1,
32 { { 6, 4 }, { 8, 5 }, { 0, }, { 0, }, { 0, } },
33 },
34 { "prefetch", TILEGX_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1,
35 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
36 },
37 { "prefetch_add_l1", TILEGX_OPC_PREFETCH_ADD_L1, 0x2, 2, TREG_ZERO, 1,
38 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
39 },
40 { "prefetch_add_l1_fault", TILEGX_OPC_PREFETCH_ADD_L1_FAULT, 0x2, 2, TREG_ZERO, 1,
41 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
42 },
43 { "prefetch_add_l2", TILEGX_OPC_PREFETCH_ADD_L2, 0x2, 2, TREG_ZERO, 1,
44 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
45 },
46 { "prefetch_add_l2_fault", TILEGX_OPC_PREFETCH_ADD_L2_FAULT, 0x2, 2, TREG_ZERO, 1,
47 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
48 },
49 { "prefetch_add_l3", TILEGX_OPC_PREFETCH_ADD_L3, 0x2, 2, TREG_ZERO, 1,
50 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
51 },
52 { "prefetch_add_l3_fault", TILEGX_OPC_PREFETCH_ADD_L3_FAULT, 0x2, 2, TREG_ZERO, 1,
53 { { 0, }, { 15, 1 }, { 0, }, { 0, }, { 0, } },
54 },
55 { "prefetch_l1", TILEGX_OPC_PREFETCH_L1, 0x12, 1, TREG_ZERO, 1,
56 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
57 },
58 { "prefetch_l1_fault", TILEGX_OPC_PREFETCH_L1_FAULT, 0x12, 1, TREG_ZERO, 1,
59 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
60 },
61 { "prefetch_l2", TILEGX_OPC_PREFETCH_L2, 0x12, 1, TREG_ZERO, 1,
62 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
63 },
64 { "prefetch_l2_fault", TILEGX_OPC_PREFETCH_L2_FAULT, 0x12, 1, TREG_ZERO, 1,
65 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
66 },
67 { "prefetch_l3", TILEGX_OPC_PREFETCH_L3, 0x12, 1, TREG_ZERO, 1,
68 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
69 },
70 { "prefetch_l3_fault", TILEGX_OPC_PREFETCH_L3_FAULT, 0x12, 1, TREG_ZERO, 1,
71 { { 0, }, { 9 }, { 0, }, { 0, }, { 14 } },
72 },
73 { "raise", TILEGX_OPC_RAISE, 0x2, 0, TREG_ZERO, 1,
74 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
75 },
76 { "add", TILEGX_OPC_ADD, 0xf, 3, TREG_ZERO, 1,
77 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
78 },
79 { "addi", TILEGX_OPC_ADDI, 0xf, 3, TREG_ZERO, 1,
80 { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
81 },
82 { "addli", TILEGX_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1,
83 { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
84 },
85 { "addx", TILEGX_OPC_ADDX, 0xf, 3, TREG_ZERO, 1,
86 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
87 },
88 { "addxi", TILEGX_OPC_ADDXI, 0xf, 3, TREG_ZERO, 1,
89 { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
90 },
91 { "addxli", TILEGX_OPC_ADDXLI, 0x3, 3, TREG_ZERO, 1,
92 { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
93 },
94 { "addxsc", TILEGX_OPC_ADDXSC, 0x3, 3, TREG_ZERO, 1,
95 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
96 },
97 { "and", TILEGX_OPC_AND, 0xf, 3, TREG_ZERO, 1,
98 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
99 },
100 { "andi", TILEGX_OPC_ANDI, 0xf, 3, TREG_ZERO, 1,
101 { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
102 },
103 { "beqz", TILEGX_OPC_BEQZ, 0x2, 2, TREG_ZERO, 1,
104 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
105 },
106 { "beqzt", TILEGX_OPC_BEQZT, 0x2, 2, TREG_ZERO, 1,
107 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
108 },
109 { "bfexts", TILEGX_OPC_BFEXTS, 0x1, 4, TREG_ZERO, 1,
110 { { 6, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
111 },
112 { "bfextu", TILEGX_OPC_BFEXTU, 0x1, 4, TREG_ZERO, 1,
113 { { 6, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
114 },
115 { "bfins", TILEGX_OPC_BFINS, 0x1, 4, TREG_ZERO, 1,
116 { { 23, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
117 },
118 { "bgez", TILEGX_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1,
119 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
120 },
121 { "bgezt", TILEGX_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1,
122 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
123 },
124 { "bgtz", TILEGX_OPC_BGTZ, 0x2, 2, TREG_ZERO, 1,
125 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
126 },
127 { "bgtzt", TILEGX_OPC_BGTZT, 0x2, 2, TREG_ZERO, 1,
128 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
129 },
130 { "blbc", TILEGX_OPC_BLBC, 0x2, 2, TREG_ZERO, 1,
131 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
132 },
133 { "blbct", TILEGX_OPC_BLBCT, 0x2, 2, TREG_ZERO, 1,
134 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
135 },
136 { "blbs", TILEGX_OPC_BLBS, 0x2, 2, TREG_ZERO, 1,
137 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
138 },
139 { "blbst", TILEGX_OPC_BLBST, 0x2, 2, TREG_ZERO, 1,
140 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
141 },
142 { "blez", TILEGX_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1,
143 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
144 },
145 { "blezt", TILEGX_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1,
146 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
147 },
148 { "bltz", TILEGX_OPC_BLTZ, 0x2, 2, TREG_ZERO, 1,
149 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
150 },
151 { "bltzt", TILEGX_OPC_BLTZT, 0x2, 2, TREG_ZERO, 1,
152 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
153 },
154 { "bnez", TILEGX_OPC_BNEZ, 0x2, 2, TREG_ZERO, 1,
155 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
156 },
157 { "bnezt", TILEGX_OPC_BNEZT, 0x2, 2, TREG_ZERO, 1,
158 { { 0, }, { 9, 20 }, { 0, }, { 0, }, { 0, } },
159 },
160 { "clz", TILEGX_OPC_CLZ, 0x5, 2, TREG_ZERO, 1,
161 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
162 },
163 { "cmoveqz", TILEGX_OPC_CMOVEQZ, 0x5, 3, TREG_ZERO, 1,
164 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
165 },
166 { "cmovnez", TILEGX_OPC_CMOVNEZ, 0x5, 3, TREG_ZERO, 1,
167 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
168 },
169 { "cmpeq", TILEGX_OPC_CMPEQ, 0xf, 3, TREG_ZERO, 1,
170 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
171 },
172 { "cmpeqi", TILEGX_OPC_CMPEQI, 0xf, 3, TREG_ZERO, 1,
173 { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
174 },
175 { "cmpexch", TILEGX_OPC_CMPEXCH, 0x2, 3, TREG_ZERO, 1,
176 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
177 },
178 { "cmpexch4", TILEGX_OPC_CMPEXCH4, 0x2, 3, TREG_ZERO, 1,
179 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
180 },
181 { "cmples", TILEGX_OPC_CMPLES, 0xf, 3, TREG_ZERO, 1,
182 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
183 },
184 { "cmpleu", TILEGX_OPC_CMPLEU, 0xf, 3, TREG_ZERO, 1,
185 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
186 },
187 { "cmplts", TILEGX_OPC_CMPLTS, 0xf, 3, TREG_ZERO, 1,
188 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
189 },
190 { "cmpltsi", TILEGX_OPC_CMPLTSI, 0xf, 3, TREG_ZERO, 1,
191 { { 6, 7, 0 }, { 8, 9, 1 }, { 10, 11, 2 }, { 12, 13, 3 }, { 0, } },
192 },
193 { "cmpltu", TILEGX_OPC_CMPLTU, 0xf, 3, TREG_ZERO, 1,
194 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
195 },
196 { "cmpltui", TILEGX_OPC_CMPLTUI, 0x3, 3, TREG_ZERO, 1,
197 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
198 },
199 { "cmpne", TILEGX_OPC_CMPNE, 0xf, 3, TREG_ZERO, 1,
200 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
201 },
202 { "cmul", TILEGX_OPC_CMUL, 0x1, 3, TREG_ZERO, 1,
203 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
204 },
205 { "cmula", TILEGX_OPC_CMULA, 0x1, 3, TREG_ZERO, 1,
206 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
207 },
208 { "cmulaf", TILEGX_OPC_CMULAF, 0x1, 3, TREG_ZERO, 1,
209 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
210 },
211 { "cmulf", TILEGX_OPC_CMULF, 0x1, 3, TREG_ZERO, 1,
212 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
213 },
214 { "cmulfr", TILEGX_OPC_CMULFR, 0x1, 3, TREG_ZERO, 1,
215 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
216 },
217 { "cmulh", TILEGX_OPC_CMULH, 0x1, 3, TREG_ZERO, 1,
218 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
219 },
220 { "cmulhr", TILEGX_OPC_CMULHR, 0x1, 3, TREG_ZERO, 1,
221 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
222 },
223 { "crc32_32", TILEGX_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1,
224 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
225 },
226 { "crc32_8", TILEGX_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1,
227 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
228 },
229 { "ctz", TILEGX_OPC_CTZ, 0x5, 2, TREG_ZERO, 1,
230 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
231 },
232 { "dblalign", TILEGX_OPC_DBLALIGN, 0x1, 3, TREG_ZERO, 1,
233 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
234 },
235 { "dblalign2", TILEGX_OPC_DBLALIGN2, 0x3, 3, TREG_ZERO, 1,
236 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
237 },
238 { "dblalign4", TILEGX_OPC_DBLALIGN4, 0x3, 3, TREG_ZERO, 1,
239 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
240 },
241 { "dblalign6", TILEGX_OPC_DBLALIGN6, 0x3, 3, TREG_ZERO, 1,
242 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
243 },
244 { "drain", TILEGX_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0,
245 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
246 },
247 { "dtlbpr", TILEGX_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1,
248 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
249 },
250 { "exch", TILEGX_OPC_EXCH, 0x2, 3, TREG_ZERO, 1,
251 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
252 },
253 { "exch4", TILEGX_OPC_EXCH4, 0x2, 3, TREG_ZERO, 1,
254 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
255 },
256 { "fdouble_add_flags", TILEGX_OPC_FDOUBLE_ADD_FLAGS, 0x1, 3, TREG_ZERO, 1,
257 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
258 },
259 { "fdouble_addsub", TILEGX_OPC_FDOUBLE_ADDSUB, 0x1, 3, TREG_ZERO, 1,
260 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
261 },
262 { "fdouble_mul_flags", TILEGX_OPC_FDOUBLE_MUL_FLAGS, 0x1, 3, TREG_ZERO, 1,
263 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
264 },
265 { "fdouble_pack1", TILEGX_OPC_FDOUBLE_PACK1, 0x1, 3, TREG_ZERO, 1,
266 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
267 },
268 { "fdouble_pack2", TILEGX_OPC_FDOUBLE_PACK2, 0x1, 3, TREG_ZERO, 1,
269 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
270 },
271 { "fdouble_sub_flags", TILEGX_OPC_FDOUBLE_SUB_FLAGS, 0x1, 3, TREG_ZERO, 1,
272 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
273 },
274 { "fdouble_unpack_max", TILEGX_OPC_FDOUBLE_UNPACK_MAX, 0x1, 3, TREG_ZERO, 1,
275 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
276 },
277 { "fdouble_unpack_min", TILEGX_OPC_FDOUBLE_UNPACK_MIN, 0x1, 3, TREG_ZERO, 1,
278 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
279 },
280 { "fetchadd", TILEGX_OPC_FETCHADD, 0x2, 3, TREG_ZERO, 1,
281 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
282 },
283 { "fetchadd4", TILEGX_OPC_FETCHADD4, 0x2, 3, TREG_ZERO, 1,
284 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
285 },
286 { "fetchaddgez", TILEGX_OPC_FETCHADDGEZ, 0x2, 3, TREG_ZERO, 1,
287 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
288 },
289 { "fetchaddgez4", TILEGX_OPC_FETCHADDGEZ4, 0x2, 3, TREG_ZERO, 1,
290 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
291 },
292 { "fetchand", TILEGX_OPC_FETCHAND, 0x2, 3, TREG_ZERO, 1,
293 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
294 },
295 { "fetchand4", TILEGX_OPC_FETCHAND4, 0x2, 3, TREG_ZERO, 1,
296 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
297 },
298 { "fetchor", TILEGX_OPC_FETCHOR, 0x2, 3, TREG_ZERO, 1,
299 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
300 },
301 { "fetchor4", TILEGX_OPC_FETCHOR4, 0x2, 3, TREG_ZERO, 1,
302 { { 0, }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
303 },
304 { "finv", TILEGX_OPC_FINV, 0x2, 1, TREG_ZERO, 1,
305 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
306 },
307 { "flush", TILEGX_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1,
308 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
309 },
310 { "flushwb", TILEGX_OPC_FLUSHWB, 0x2, 0, TREG_ZERO, 1,
311 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
312 },
313 { "fnop", TILEGX_OPC_FNOP, 0xf, 0, TREG_ZERO, 1,
314 { { }, { }, { }, { }, { 0, } },
315 },
316 { "fsingle_add1", TILEGX_OPC_FSINGLE_ADD1, 0x1, 3, TREG_ZERO, 1,
317 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
318 },
319 { "fsingle_addsub2", TILEGX_OPC_FSINGLE_ADDSUB2, 0x1, 3, TREG_ZERO, 1,
320 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
321 },
322 { "fsingle_mul1", TILEGX_OPC_FSINGLE_MUL1, 0x1, 3, TREG_ZERO, 1,
323 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
324 },
325 { "fsingle_mul2", TILEGX_OPC_FSINGLE_MUL2, 0x1, 3, TREG_ZERO, 1,
326 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
327 },
328 { "fsingle_pack1", TILEGX_OPC_FSINGLE_PACK1, 0x5, 2, TREG_ZERO, 1,
329 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
330 },
331 { "fsingle_pack2", TILEGX_OPC_FSINGLE_PACK2, 0x1, 3, TREG_ZERO, 1,
332 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
333 },
334 { "fsingle_sub1", TILEGX_OPC_FSINGLE_SUB1, 0x1, 3, TREG_ZERO, 1,
335 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
336 },
337 { "icoh", TILEGX_OPC_ICOH, 0x2, 1, TREG_ZERO, 1,
338 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
339 },
340 { "ill", TILEGX_OPC_ILL, 0xa, 0, TREG_ZERO, 1,
341 { { 0, }, { }, { 0, }, { }, { 0, } },
342 },
343 { "inv", TILEGX_OPC_INV, 0x2, 1, TREG_ZERO, 1,
344 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
345 },
346 { "iret", TILEGX_OPC_IRET, 0x2, 0, TREG_ZERO, 1,
347 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
348 },
349 { "j", TILEGX_OPC_J, 0x2, 1, TREG_ZERO, 1,
350 { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } },
351 },
352 { "jal", TILEGX_OPC_JAL, 0x2, 1, TREG_LR, 1,
353 { { 0, }, { 25 }, { 0, }, { 0, }, { 0, } },
354 },
355 { "jalr", TILEGX_OPC_JALR, 0xa, 1, TREG_LR, 1,
356 { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
357 },
358 { "jalrp", TILEGX_OPC_JALRP, 0xa, 1, TREG_LR, 1,
359 { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
360 },
361 { "jr", TILEGX_OPC_JR, 0xa, 1, TREG_ZERO, 1,
362 { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
363 },
364 { "jrp", TILEGX_OPC_JRP, 0xa, 1, TREG_ZERO, 1,
365 { { 0, }, { 9 }, { 0, }, { 13 }, { 0, } },
366 },
367 { "ld", TILEGX_OPC_LD, 0x12, 2, TREG_ZERO, 1,
368 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
369 },
370 { "ld1s", TILEGX_OPC_LD1S, 0x12, 2, TREG_ZERO, 1,
371 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
372 },
373 { "ld1s_add", TILEGX_OPC_LD1S_ADD, 0x2, 3, TREG_ZERO, 1,
374 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
375 },
376 { "ld1u", TILEGX_OPC_LD1U, 0x12, 2, TREG_ZERO, 1,
377 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
378 },
379 { "ld1u_add", TILEGX_OPC_LD1U_ADD, 0x2, 3, TREG_ZERO, 1,
380 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
381 },
382 { "ld2s", TILEGX_OPC_LD2S, 0x12, 2, TREG_ZERO, 1,
383 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
384 },
385 { "ld2s_add", TILEGX_OPC_LD2S_ADD, 0x2, 3, TREG_ZERO, 1,
386 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
387 },
388 { "ld2u", TILEGX_OPC_LD2U, 0x12, 2, TREG_ZERO, 1,
389 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
390 },
391 { "ld2u_add", TILEGX_OPC_LD2U_ADD, 0x2, 3, TREG_ZERO, 1,
392 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
393 },
394 { "ld4s", TILEGX_OPC_LD4S, 0x12, 2, TREG_ZERO, 1,
395 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
396 },
397 { "ld4s_add", TILEGX_OPC_LD4S_ADD, 0x2, 3, TREG_ZERO, 1,
398 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
399 },
400 { "ld4u", TILEGX_OPC_LD4U, 0x12, 2, TREG_ZERO, 1,
401 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 26, 14 } },
402 },
403 { "ld4u_add", TILEGX_OPC_LD4U_ADD, 0x2, 3, TREG_ZERO, 1,
404 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
405 },
406 { "ld_add", TILEGX_OPC_LD_ADD, 0x2, 3, TREG_ZERO, 1,
407 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
408 },
409 { "ldna", TILEGX_OPC_LDNA, 0x2, 2, TREG_ZERO, 1,
410 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
411 },
412 { "ldna_add", TILEGX_OPC_LDNA_ADD, 0x2, 3, TREG_ZERO, 1,
413 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
414 },
415 { "ldnt", TILEGX_OPC_LDNT, 0x2, 2, TREG_ZERO, 1,
416 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
417 },
418 { "ldnt1s", TILEGX_OPC_LDNT1S, 0x2, 2, TREG_ZERO, 1,
419 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
420 },
421 { "ldnt1s_add", TILEGX_OPC_LDNT1S_ADD, 0x2, 3, TREG_ZERO, 1,
422 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
423 },
424 { "ldnt1u", TILEGX_OPC_LDNT1U, 0x2, 2, TREG_ZERO, 1,
425 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
426 },
427 { "ldnt1u_add", TILEGX_OPC_LDNT1U_ADD, 0x2, 3, TREG_ZERO, 1,
428 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
429 },
430 { "ldnt2s", TILEGX_OPC_LDNT2S, 0x2, 2, TREG_ZERO, 1,
431 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
432 },
433 { "ldnt2s_add", TILEGX_OPC_LDNT2S_ADD, 0x2, 3, TREG_ZERO, 1,
434 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
435 },
436 { "ldnt2u", TILEGX_OPC_LDNT2U, 0x2, 2, TREG_ZERO, 1,
437 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
438 },
439 { "ldnt2u_add", TILEGX_OPC_LDNT2U_ADD, 0x2, 3, TREG_ZERO, 1,
440 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
441 },
442 { "ldnt4s", TILEGX_OPC_LDNT4S, 0x2, 2, TREG_ZERO, 1,
443 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
444 },
445 { "ldnt4s_add", TILEGX_OPC_LDNT4S_ADD, 0x2, 3, TREG_ZERO, 1,
446 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
447 },
448 { "ldnt4u", TILEGX_OPC_LDNT4U, 0x2, 2, TREG_ZERO, 1,
449 { { 0, }, { 8, 9 }, { 0, }, { 0, }, { 0, } },
450 },
451 { "ldnt4u_add", TILEGX_OPC_LDNT4U_ADD, 0x2, 3, TREG_ZERO, 1,
452 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
453 },
454 { "ldnt_add", TILEGX_OPC_LDNT_ADD, 0x2, 3, TREG_ZERO, 1,
455 { { 0, }, { 8, 15, 1 }, { 0, }, { 0, }, { 0, } },
456 },
457 { "lnk", TILEGX_OPC_LNK, 0xa, 1, TREG_ZERO, 1,
458 { { 0, }, { 8 }, { 0, }, { 12 }, { 0, } },
459 },
460 { "mf", TILEGX_OPC_MF, 0x2, 0, TREG_ZERO, 1,
461 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
462 },
463 { "mfspr", TILEGX_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1,
464 { { 0, }, { 8, 27 }, { 0, }, { 0, }, { 0, } },
465 },
466 { "mm", TILEGX_OPC_MM, 0x1, 4, TREG_ZERO, 1,
467 { { 23, 7, 21, 22 }, { 0, }, { 0, }, { 0, }, { 0, } },
468 },
469 { "mnz", TILEGX_OPC_MNZ, 0xf, 3, TREG_ZERO, 1,
470 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
471 },
472 { "mtspr", TILEGX_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1,
473 { { 0, }, { 28, 9 }, { 0, }, { 0, }, { 0, } },
474 },
475 { "mul_hs_hs", TILEGX_OPC_MUL_HS_HS, 0x5, 3, TREG_ZERO, 1,
476 { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
477 },
478 { "mul_hs_hu", TILEGX_OPC_MUL_HS_HU, 0x1, 3, TREG_ZERO, 1,
479 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
480 },
481 { "mul_hs_ls", TILEGX_OPC_MUL_HS_LS, 0x1, 3, TREG_ZERO, 1,
482 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
483 },
484 { "mul_hs_lu", TILEGX_OPC_MUL_HS_LU, 0x1, 3, TREG_ZERO, 1,
485 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
486 },
487 { "mul_hu_hu", TILEGX_OPC_MUL_HU_HU, 0x5, 3, TREG_ZERO, 1,
488 { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
489 },
490 { "mul_hu_ls", TILEGX_OPC_MUL_HU_LS, 0x1, 3, TREG_ZERO, 1,
491 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
492 },
493 { "mul_hu_lu", TILEGX_OPC_MUL_HU_LU, 0x1, 3, TREG_ZERO, 1,
494 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
495 },
496 { "mul_ls_ls", TILEGX_OPC_MUL_LS_LS, 0x5, 3, TREG_ZERO, 1,
497 { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
498 },
499 { "mul_ls_lu", TILEGX_OPC_MUL_LS_LU, 0x1, 3, TREG_ZERO, 1,
500 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
501 },
502 { "mul_lu_lu", TILEGX_OPC_MUL_LU_LU, 0x5, 3, TREG_ZERO, 1,
503 { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
504 },
505 { "mula_hs_hs", TILEGX_OPC_MULA_HS_HS, 0x5, 3, TREG_ZERO, 1,
506 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
507 },
508 { "mula_hs_hu", TILEGX_OPC_MULA_HS_HU, 0x1, 3, TREG_ZERO, 1,
509 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
510 },
511 { "mula_hs_ls", TILEGX_OPC_MULA_HS_LS, 0x1, 3, TREG_ZERO, 1,
512 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
513 },
514 { "mula_hs_lu", TILEGX_OPC_MULA_HS_LU, 0x1, 3, TREG_ZERO, 1,
515 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
516 },
517 { "mula_hu_hu", TILEGX_OPC_MULA_HU_HU, 0x5, 3, TREG_ZERO, 1,
518 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
519 },
520 { "mula_hu_ls", TILEGX_OPC_MULA_HU_LS, 0x1, 3, TREG_ZERO, 1,
521 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
522 },
523 { "mula_hu_lu", TILEGX_OPC_MULA_HU_LU, 0x1, 3, TREG_ZERO, 1,
524 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
525 },
526 { "mula_ls_ls", TILEGX_OPC_MULA_LS_LS, 0x5, 3, TREG_ZERO, 1,
527 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
528 },
529 { "mula_ls_lu", TILEGX_OPC_MULA_LS_LU, 0x1, 3, TREG_ZERO, 1,
530 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
531 },
532 { "mula_lu_lu", TILEGX_OPC_MULA_LU_LU, 0x5, 3, TREG_ZERO, 1,
533 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
534 },
535 { "mulax", TILEGX_OPC_MULAX, 0x5, 3, TREG_ZERO, 1,
536 { { 23, 7, 16 }, { 0, }, { 24, 11, 18 }, { 0, }, { 0, } },
537 },
538 { "mulx", TILEGX_OPC_MULX, 0x5, 3, TREG_ZERO, 1,
539 { { 6, 7, 16 }, { 0, }, { 10, 11, 18 }, { 0, }, { 0, } },
540 },
541 { "mz", TILEGX_OPC_MZ, 0xf, 3, TREG_ZERO, 1,
542 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
543 },
544 { "nap", TILEGX_OPC_NAP, 0x2, 0, TREG_ZERO, 0,
545 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
546 },
547 { "nop", TILEGX_OPC_NOP, 0xf, 0, TREG_ZERO, 1,
548 { { }, { }, { }, { }, { 0, } },
549 },
550 { "nor", TILEGX_OPC_NOR, 0xf, 3, TREG_ZERO, 1,
551 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
552 },
553 { "or", TILEGX_OPC_OR, 0xf, 3, TREG_ZERO, 1,
554 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
555 },
556 { "ori", TILEGX_OPC_ORI, 0x3, 3, TREG_ZERO, 1,
557 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
558 },
559 { "pcnt", TILEGX_OPC_PCNT, 0x5, 2, TREG_ZERO, 1,
560 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
561 },
562 { "revbits", TILEGX_OPC_REVBITS, 0x5, 2, TREG_ZERO, 1,
563 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
564 },
565 { "revbytes", TILEGX_OPC_REVBYTES, 0x5, 2, TREG_ZERO, 1,
566 { { 6, 7 }, { 0, }, { 10, 11 }, { 0, }, { 0, } },
567 },
568 { "rotl", TILEGX_OPC_ROTL, 0xf, 3, TREG_ZERO, 1,
569 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
570 },
571 { "rotli", TILEGX_OPC_ROTLI, 0xf, 3, TREG_ZERO, 1,
572 { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
573 },
574 { "shl", TILEGX_OPC_SHL, 0xf, 3, TREG_ZERO, 1,
575 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
576 },
577 { "shl16insli", TILEGX_OPC_SHL16INSLI, 0x3, 3, TREG_ZERO, 1,
578 { { 6, 7, 4 }, { 8, 9, 5 }, { 0, }, { 0, }, { 0, } },
579 },
580 { "shl1add", TILEGX_OPC_SHL1ADD, 0xf, 3, TREG_ZERO, 1,
581 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
582 },
583 { "shl1addx", TILEGX_OPC_SHL1ADDX, 0xf, 3, TREG_ZERO, 1,
584 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
585 },
586 { "shl2add", TILEGX_OPC_SHL2ADD, 0xf, 3, TREG_ZERO, 1,
587 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
588 },
589 { "shl2addx", TILEGX_OPC_SHL2ADDX, 0xf, 3, TREG_ZERO, 1,
590 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
591 },
592 { "shl3add", TILEGX_OPC_SHL3ADD, 0xf, 3, TREG_ZERO, 1,
593 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
594 },
595 { "shl3addx", TILEGX_OPC_SHL3ADDX, 0xf, 3, TREG_ZERO, 1,
596 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
597 },
598 { "shli", TILEGX_OPC_SHLI, 0xf, 3, TREG_ZERO, 1,
599 { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
600 },
601 { "shlx", TILEGX_OPC_SHLX, 0x3, 3, TREG_ZERO, 1,
602 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
603 },
604 { "shlxi", TILEGX_OPC_SHLXI, 0x3, 3, TREG_ZERO, 1,
605 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
606 },
607 { "shrs", TILEGX_OPC_SHRS, 0xf, 3, TREG_ZERO, 1,
608 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
609 },
610 { "shrsi", TILEGX_OPC_SHRSI, 0xf, 3, TREG_ZERO, 1,
611 { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
612 },
613 { "shru", TILEGX_OPC_SHRU, 0xf, 3, TREG_ZERO, 1,
614 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
615 },
616 { "shrui", TILEGX_OPC_SHRUI, 0xf, 3, TREG_ZERO, 1,
617 { { 6, 7, 29 }, { 8, 9, 30 }, { 10, 11, 31 }, { 12, 13, 32 }, { 0, } },
618 },
619 { "shrux", TILEGX_OPC_SHRUX, 0x3, 3, TREG_ZERO, 1,
620 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
621 },
622 { "shruxi", TILEGX_OPC_SHRUXI, 0x3, 3, TREG_ZERO, 1,
623 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
624 },
625 { "shufflebytes", TILEGX_OPC_SHUFFLEBYTES, 0x1, 3, TREG_ZERO, 1,
626 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
627 },
628 { "st", TILEGX_OPC_ST, 0x12, 2, TREG_ZERO, 1,
629 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
630 },
631 { "st1", TILEGX_OPC_ST1, 0x12, 2, TREG_ZERO, 1,
632 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
633 },
634 { "st1_add", TILEGX_OPC_ST1_ADD, 0x2, 3, TREG_ZERO, 1,
635 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
636 },
637 { "st2", TILEGX_OPC_ST2, 0x12, 2, TREG_ZERO, 1,
638 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
639 },
640 { "st2_add", TILEGX_OPC_ST2_ADD, 0x2, 3, TREG_ZERO, 1,
641 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
642 },
643 { "st4", TILEGX_OPC_ST4, 0x12, 2, TREG_ZERO, 1,
644 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 14, 33 } },
645 },
646 { "st4_add", TILEGX_OPC_ST4_ADD, 0x2, 3, TREG_ZERO, 1,
647 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
648 },
649 { "st_add", TILEGX_OPC_ST_ADD, 0x2, 3, TREG_ZERO, 1,
650 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
651 },
652 { "stnt", TILEGX_OPC_STNT, 0x2, 2, TREG_ZERO, 1,
653 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
654 },
655 { "stnt1", TILEGX_OPC_STNT1, 0x2, 2, TREG_ZERO, 1,
656 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
657 },
658 { "stnt1_add", TILEGX_OPC_STNT1_ADD, 0x2, 3, TREG_ZERO, 1,
659 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
660 },
661 { "stnt2", TILEGX_OPC_STNT2, 0x2, 2, TREG_ZERO, 1,
662 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
663 },
664 { "stnt2_add", TILEGX_OPC_STNT2_ADD, 0x2, 3, TREG_ZERO, 1,
665 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
666 },
667 { "stnt4", TILEGX_OPC_STNT4, 0x2, 2, TREG_ZERO, 1,
668 { { 0, }, { 9, 17 }, { 0, }, { 0, }, { 0, } },
669 },
670 { "stnt4_add", TILEGX_OPC_STNT4_ADD, 0x2, 3, TREG_ZERO, 1,
671 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
672 },
673 { "stnt_add", TILEGX_OPC_STNT_ADD, 0x2, 3, TREG_ZERO, 1,
674 { { 0, }, { 15, 17, 34 }, { 0, }, { 0, }, { 0, } },
675 },
676 { "sub", TILEGX_OPC_SUB, 0xf, 3, TREG_ZERO, 1,
677 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
678 },
679 { "subx", TILEGX_OPC_SUBX, 0xf, 3, TREG_ZERO, 1,
680 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
681 },
682 { "subxsc", TILEGX_OPC_SUBXSC, 0x3, 3, TREG_ZERO, 1,
683 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
684 },
685 { "swint0", TILEGX_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0,
686 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
687 },
688 { "swint1", TILEGX_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0,
689 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
690 },
691 { "swint2", TILEGX_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0,
692 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
693 },
694 { "swint3", TILEGX_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0,
695 { { 0, }, { }, { 0, }, { 0, }, { 0, } },
696 },
697 { "tblidxb0", TILEGX_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1,
698 { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
699 },
700 { "tblidxb1", TILEGX_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1,
701 { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
702 },
703 { "tblidxb2", TILEGX_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1,
704 { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
705 },
706 { "tblidxb3", TILEGX_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1,
707 { { 23, 7 }, { 0, }, { 24, 11 }, { 0, }, { 0, } },
708 },
709 { "v1add", TILEGX_OPC_V1ADD, 0x3, 3, TREG_ZERO, 1,
710 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
711 },
712 { "v1addi", TILEGX_OPC_V1ADDI, 0x3, 3, TREG_ZERO, 1,
713 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
714 },
715 { "v1adduc", TILEGX_OPC_V1ADDUC, 0x3, 3, TREG_ZERO, 1,
716 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
717 },
718 { "v1adiffu", TILEGX_OPC_V1ADIFFU, 0x1, 3, TREG_ZERO, 1,
719 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
720 },
721 { "v1avgu", TILEGX_OPC_V1AVGU, 0x1, 3, TREG_ZERO, 1,
722 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
723 },
724 { "v1cmpeq", TILEGX_OPC_V1CMPEQ, 0x3, 3, TREG_ZERO, 1,
725 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
726 },
727 { "v1cmpeqi", TILEGX_OPC_V1CMPEQI, 0x3, 3, TREG_ZERO, 1,
728 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
729 },
730 { "v1cmples", TILEGX_OPC_V1CMPLES, 0x3, 3, TREG_ZERO, 1,
731 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
732 },
733 { "v1cmpleu", TILEGX_OPC_V1CMPLEU, 0x3, 3, TREG_ZERO, 1,
734 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
735 },
736 { "v1cmplts", TILEGX_OPC_V1CMPLTS, 0x3, 3, TREG_ZERO, 1,
737 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
738 },
739 { "v1cmpltsi", TILEGX_OPC_V1CMPLTSI, 0x3, 3, TREG_ZERO, 1,
740 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
741 },
742 { "v1cmpltu", TILEGX_OPC_V1CMPLTU, 0x3, 3, TREG_ZERO, 1,
743 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
744 },
745 { "v1cmpltui", TILEGX_OPC_V1CMPLTUI, 0x3, 3, TREG_ZERO, 1,
746 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
747 },
748 { "v1cmpne", TILEGX_OPC_V1CMPNE, 0x3, 3, TREG_ZERO, 1,
749 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
750 },
751 { "v1ddotpu", TILEGX_OPC_V1DDOTPU, 0x1, 3, TREG_ZERO, 1,
752 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
753 },
754 { "v1ddotpua", TILEGX_OPC_V1DDOTPUA, 0x1, 3, TREG_ZERO, 1,
755 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
756 },
757 { "v1ddotpus", TILEGX_OPC_V1DDOTPUS, 0x1, 3, TREG_ZERO, 1,
758 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
759 },
760 { "v1ddotpusa", TILEGX_OPC_V1DDOTPUSA, 0x1, 3, TREG_ZERO, 1,
761 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
762 },
763 { "v1dotp", TILEGX_OPC_V1DOTP, 0x1, 3, TREG_ZERO, 1,
764 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
765 },
766 { "v1dotpa", TILEGX_OPC_V1DOTPA, 0x1, 3, TREG_ZERO, 1,
767 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
768 },
769 { "v1dotpu", TILEGX_OPC_V1DOTPU, 0x1, 3, TREG_ZERO, 1,
770 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
771 },
772 { "v1dotpua", TILEGX_OPC_V1DOTPUA, 0x1, 3, TREG_ZERO, 1,
773 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
774 },
775 { "v1dotpus", TILEGX_OPC_V1DOTPUS, 0x1, 3, TREG_ZERO, 1,
776 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
777 },
778 { "v1dotpusa", TILEGX_OPC_V1DOTPUSA, 0x1, 3, TREG_ZERO, 1,
779 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
780 },
781 { "v1int_h", TILEGX_OPC_V1INT_H, 0x3, 3, TREG_ZERO, 1,
782 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
783 },
784 { "v1int_l", TILEGX_OPC_V1INT_L, 0x3, 3, TREG_ZERO, 1,
785 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
786 },
787 { "v1maxu", TILEGX_OPC_V1MAXU, 0x3, 3, TREG_ZERO, 1,
788 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
789 },
790 { "v1maxui", TILEGX_OPC_V1MAXUI, 0x3, 3, TREG_ZERO, 1,
791 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
792 },
793 { "v1minu", TILEGX_OPC_V1MINU, 0x3, 3, TREG_ZERO, 1,
794 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
795 },
796 { "v1minui", TILEGX_OPC_V1MINUI, 0x3, 3, TREG_ZERO, 1,
797 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
798 },
799 { "v1mnz", TILEGX_OPC_V1MNZ, 0x3, 3, TREG_ZERO, 1,
800 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
801 },
802 { "v1multu", TILEGX_OPC_V1MULTU, 0x1, 3, TREG_ZERO, 1,
803 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
804 },
805 { "v1mulu", TILEGX_OPC_V1MULU, 0x1, 3, TREG_ZERO, 1,
806 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
807 },
808 { "v1mulus", TILEGX_OPC_V1MULUS, 0x1, 3, TREG_ZERO, 1,
809 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
810 },
811 { "v1mz", TILEGX_OPC_V1MZ, 0x3, 3, TREG_ZERO, 1,
812 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
813 },
814 { "v1sadau", TILEGX_OPC_V1SADAU, 0x1, 3, TREG_ZERO, 1,
815 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
816 },
817 { "v1sadu", TILEGX_OPC_V1SADU, 0x1, 3, TREG_ZERO, 1,
818 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
819 },
820 { "v1shl", TILEGX_OPC_V1SHL, 0x3, 3, TREG_ZERO, 1,
821 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
822 },
823 { "v1shli", TILEGX_OPC_V1SHLI, 0x3, 3, TREG_ZERO, 1,
824 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
825 },
826 { "v1shrs", TILEGX_OPC_V1SHRS, 0x3, 3, TREG_ZERO, 1,
827 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
828 },
829 { "v1shrsi", TILEGX_OPC_V1SHRSI, 0x3, 3, TREG_ZERO, 1,
830 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
831 },
832 { "v1shru", TILEGX_OPC_V1SHRU, 0x3, 3, TREG_ZERO, 1,
833 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
834 },
835 { "v1shrui", TILEGX_OPC_V1SHRUI, 0x3, 3, TREG_ZERO, 1,
836 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
837 },
838 { "v1sub", TILEGX_OPC_V1SUB, 0x3, 3, TREG_ZERO, 1,
839 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
840 },
841 { "v1subuc", TILEGX_OPC_V1SUBUC, 0x3, 3, TREG_ZERO, 1,
842 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
843 },
844 { "v2add", TILEGX_OPC_V2ADD, 0x3, 3, TREG_ZERO, 1,
845 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
846 },
847 { "v2addi", TILEGX_OPC_V2ADDI, 0x3, 3, TREG_ZERO, 1,
848 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
849 },
850 { "v2addsc", TILEGX_OPC_V2ADDSC, 0x3, 3, TREG_ZERO, 1,
851 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
852 },
853 { "v2adiffs", TILEGX_OPC_V2ADIFFS, 0x1, 3, TREG_ZERO, 1,
854 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
855 },
856 { "v2avgs", TILEGX_OPC_V2AVGS, 0x1, 3, TREG_ZERO, 1,
857 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
858 },
859 { "v2cmpeq", TILEGX_OPC_V2CMPEQ, 0x3, 3, TREG_ZERO, 1,
860 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
861 },
862 { "v2cmpeqi", TILEGX_OPC_V2CMPEQI, 0x3, 3, TREG_ZERO, 1,
863 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
864 },
865 { "v2cmples", TILEGX_OPC_V2CMPLES, 0x3, 3, TREG_ZERO, 1,
866 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
867 },
868 { "v2cmpleu", TILEGX_OPC_V2CMPLEU, 0x3, 3, TREG_ZERO, 1,
869 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
870 },
871 { "v2cmplts", TILEGX_OPC_V2CMPLTS, 0x3, 3, TREG_ZERO, 1,
872 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
873 },
874 { "v2cmpltsi", TILEGX_OPC_V2CMPLTSI, 0x3, 3, TREG_ZERO, 1,
875 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
876 },
877 { "v2cmpltu", TILEGX_OPC_V2CMPLTU, 0x3, 3, TREG_ZERO, 1,
878 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
879 },
880 { "v2cmpltui", TILEGX_OPC_V2CMPLTUI, 0x3, 3, TREG_ZERO, 1,
881 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
882 },
883 { "v2cmpne", TILEGX_OPC_V2CMPNE, 0x3, 3, TREG_ZERO, 1,
884 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
885 },
886 { "v2dotp", TILEGX_OPC_V2DOTP, 0x1, 3, TREG_ZERO, 1,
887 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
888 },
889 { "v2dotpa", TILEGX_OPC_V2DOTPA, 0x1, 3, TREG_ZERO, 1,
890 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
891 },
892 { "v2int_h", TILEGX_OPC_V2INT_H, 0x3, 3, TREG_ZERO, 1,
893 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
894 },
895 { "v2int_l", TILEGX_OPC_V2INT_L, 0x3, 3, TREG_ZERO, 1,
896 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
897 },
898 { "v2maxs", TILEGX_OPC_V2MAXS, 0x3, 3, TREG_ZERO, 1,
899 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
900 },
901 { "v2maxsi", TILEGX_OPC_V2MAXSI, 0x3, 3, TREG_ZERO, 1,
902 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
903 },
904 { "v2mins", TILEGX_OPC_V2MINS, 0x3, 3, TREG_ZERO, 1,
905 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
906 },
907 { "v2minsi", TILEGX_OPC_V2MINSI, 0x3, 3, TREG_ZERO, 1,
908 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
909 },
910 { "v2mnz", TILEGX_OPC_V2MNZ, 0x3, 3, TREG_ZERO, 1,
911 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
912 },
913 { "v2mulfsc", TILEGX_OPC_V2MULFSC, 0x1, 3, TREG_ZERO, 1,
914 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
915 },
916 { "v2muls", TILEGX_OPC_V2MULS, 0x1, 3, TREG_ZERO, 1,
917 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
918 },
919 { "v2mults", TILEGX_OPC_V2MULTS, 0x1, 3, TREG_ZERO, 1,
920 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
921 },
922 { "v2mz", TILEGX_OPC_V2MZ, 0x3, 3, TREG_ZERO, 1,
923 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
924 },
925 { "v2packh", TILEGX_OPC_V2PACKH, 0x3, 3, TREG_ZERO, 1,
926 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
927 },
928 { "v2packl", TILEGX_OPC_V2PACKL, 0x3, 3, TREG_ZERO, 1,
929 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
930 },
931 { "v2packuc", TILEGX_OPC_V2PACKUC, 0x3, 3, TREG_ZERO, 1,
932 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
933 },
934 { "v2sadas", TILEGX_OPC_V2SADAS, 0x1, 3, TREG_ZERO, 1,
935 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
936 },
937 { "v2sadau", TILEGX_OPC_V2SADAU, 0x1, 3, TREG_ZERO, 1,
938 { { 23, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
939 },
940 { "v2sads", TILEGX_OPC_V2SADS, 0x1, 3, TREG_ZERO, 1,
941 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
942 },
943 { "v2sadu", TILEGX_OPC_V2SADU, 0x1, 3, TREG_ZERO, 1,
944 { { 6, 7, 16 }, { 0, }, { 0, }, { 0, }, { 0, } },
945 },
946 { "v2shl", TILEGX_OPC_V2SHL, 0x3, 3, TREG_ZERO, 1,
947 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
948 },
949 { "v2shli", TILEGX_OPC_V2SHLI, 0x3, 3, TREG_ZERO, 1,
950 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
951 },
952 { "v2shlsc", TILEGX_OPC_V2SHLSC, 0x3, 3, TREG_ZERO, 1,
953 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
954 },
955 { "v2shrs", TILEGX_OPC_V2SHRS, 0x3, 3, TREG_ZERO, 1,
956 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
957 },
958 { "v2shrsi", TILEGX_OPC_V2SHRSI, 0x3, 3, TREG_ZERO, 1,
959 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
960 },
961 { "v2shru", TILEGX_OPC_V2SHRU, 0x3, 3, TREG_ZERO, 1,
962 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
963 },
964 { "v2shrui", TILEGX_OPC_V2SHRUI, 0x3, 3, TREG_ZERO, 1,
965 { { 6, 7, 29 }, { 8, 9, 30 }, { 0, }, { 0, }, { 0, } },
966 },
967 { "v2sub", TILEGX_OPC_V2SUB, 0x3, 3, TREG_ZERO, 1,
968 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
969 },
970 { "v2subsc", TILEGX_OPC_V2SUBSC, 0x3, 3, TREG_ZERO, 1,
971 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
972 },
973 { "v4add", TILEGX_OPC_V4ADD, 0x3, 3, TREG_ZERO, 1,
974 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
975 },
976 { "v4addsc", TILEGX_OPC_V4ADDSC, 0x3, 3, TREG_ZERO, 1,
977 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
978 },
979 { "v4int_h", TILEGX_OPC_V4INT_H, 0x3, 3, TREG_ZERO, 1,
980 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
981 },
982 { "v4int_l", TILEGX_OPC_V4INT_L, 0x3, 3, TREG_ZERO, 1,
983 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
984 },
985 { "v4packsc", TILEGX_OPC_V4PACKSC, 0x3, 3, TREG_ZERO, 1,
986 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
987 },
988 { "v4shl", TILEGX_OPC_V4SHL, 0x3, 3, TREG_ZERO, 1,
989 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
990 },
991 { "v4shlsc", TILEGX_OPC_V4SHLSC, 0x3, 3, TREG_ZERO, 1,
992 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
993 },
994 { "v4shrs", TILEGX_OPC_V4SHRS, 0x3, 3, TREG_ZERO, 1,
995 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
996 },
997 { "v4shru", TILEGX_OPC_V4SHRU, 0x3, 3, TREG_ZERO, 1,
998 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
999 },
1000 { "v4sub", TILEGX_OPC_V4SUB, 0x3, 3, TREG_ZERO, 1,
1001 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
1002 },
1003 { "v4subsc", TILEGX_OPC_V4SUBSC, 0x3, 3, TREG_ZERO, 1,
1004 { { 6, 7, 16 }, { 8, 9, 17 }, { 0, }, { 0, }, { 0, } },
1005 },
1006 { "wh64", TILEGX_OPC_WH64, 0x2, 1, TREG_ZERO, 1,
1007 { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } },
1008 },
1009 { "xor", TILEGX_OPC_XOR, 0xf, 3, TREG_ZERO, 1,
1010 { { 6, 7, 16 }, { 8, 9, 17 }, { 10, 11, 18 }, { 12, 13, 19 }, { 0, } },
1011 },
1012 { "xori", TILEGX_OPC_XORI, 0x3, 3, TREG_ZERO, 1,
1013 { { 6, 7, 0 }, { 8, 9, 1 }, { 0, }, { 0, }, { 0, } },
1014 },
1015 { NULL, TILEGX_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } },
1016 }
1017};
1018#define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6))
1019#define CHILD(array_index) (TILEGX_OPC_NONE + (array_index))
1020
1021static const unsigned short decode_X0_fsm[936] =
1022{
1023 BITFIELD(22, 9) /* index 0 */,
1024 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1025 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1026 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1027 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1028 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1029 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1030 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1031 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1032 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1033 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1034 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1035 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1036 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1037 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1038 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1039 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1040 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1041 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1042 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1043 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1044 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1045 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1046 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1047 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1048 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1049 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1050 CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI,
1051 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1052 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1053 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1054 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1055 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1056 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1057 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1058 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1059 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1060 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1061 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1062 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1063 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1064 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1065 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1066 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE,
1067 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1068 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1069 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1070 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BFEXTS,
1071 TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTS, TILEGX_OPC_BFEXTU,
1072 TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFEXTU, TILEGX_OPC_BFINS,
1073 TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_BFINS, TILEGX_OPC_MM,
1074 TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_MM, TILEGX_OPC_NONE,
1075 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1076 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1077 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1078 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1079 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1080 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1081 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1082 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(528), CHILD(578),
1083 CHILD(583), CHILD(588), CHILD(593), CHILD(598), TILEGX_OPC_NONE,
1084 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1085 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1086 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1087 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1088 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1089 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1090 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1091 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1092 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1093 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1094 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1095 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1096 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1097 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1098 TILEGX_OPC_NONE, CHILD(603), CHILD(620), CHILD(637), CHILD(654), CHILD(671),
1099 CHILD(703), CHILD(797), CHILD(814), CHILD(831), CHILD(848), CHILD(865),
1100 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1101 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1102 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1103 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1104 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1105 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1106 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1107 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1108 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1109 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1110 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1111 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1112 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1113 TILEGX_OPC_NONE, CHILD(889), TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1114 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1115 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1116 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1117 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1118 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1119 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1120 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1121 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1122 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1123 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1124 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1125 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1126 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1127 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1128 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1129 TILEGX_OPC_NONE, CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1130 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1131 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1132 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1133 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1134 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1135 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1136 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1137 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1138 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1139 CHILD(906), CHILD(906), CHILD(906), CHILD(906), CHILD(906),
1140 BITFIELD(6, 2) /* index 513 */,
1141 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518),
1142 BITFIELD(8, 2) /* index 518 */,
1143 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523),
1144 BITFIELD(10, 2) /* index 523 */,
1145 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI,
1146 BITFIELD(20, 2) /* index 528 */,
1147 TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548),
1148 BITFIELD(6, 2) /* index 533 */,
1149 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538),
1150 BITFIELD(8, 2) /* index 538 */,
1151 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543),
1152 BITFIELD(10, 2) /* index 543 */,
1153 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
1154 BITFIELD(0, 2) /* index 548 */,
1155 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553),
1156 BITFIELD(2, 2) /* index 553 */,
1157 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558),
1158 BITFIELD(4, 2) /* index 558 */,
1159 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563),
1160 BITFIELD(6, 2) /* index 563 */,
1161 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568),
1162 BITFIELD(8, 2) /* index 568 */,
1163 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573),
1164 BITFIELD(10, 2) /* index 573 */,
1165 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
1166 BITFIELD(20, 2) /* index 578 */,
1167 TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, TILEGX_OPC_ORI,
1168 BITFIELD(20, 2) /* index 583 */,
1169 TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI, TILEGX_OPC_V1CMPLTSI,
1170 TILEGX_OPC_V1CMPLTUI,
1171 BITFIELD(20, 2) /* index 588 */,
1172 TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI, TILEGX_OPC_V2ADDI,
1173 TILEGX_OPC_V2CMPEQI,
1174 BITFIELD(20, 2) /* index 593 */,
1175 TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI, TILEGX_OPC_V2MAXSI,
1176 TILEGX_OPC_V2MINSI,
1177 BITFIELD(20, 2) /* index 598 */,
1178 TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1179 BITFIELD(18, 4) /* index 603 */,
1180 TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD,
1181 TILEGX_OPC_AND, TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_CMPEQ,
1182 TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
1183 TILEGX_OPC_CMPNE, TILEGX_OPC_CMULAF, TILEGX_OPC_CMULA, TILEGX_OPC_CMULFR,
1184 BITFIELD(18, 4) /* index 620 */,
1185 TILEGX_OPC_CMULF, TILEGX_OPC_CMULHR, TILEGX_OPC_CMULH, TILEGX_OPC_CMUL,
1186 TILEGX_OPC_CRC32_32, TILEGX_OPC_CRC32_8, TILEGX_OPC_DBLALIGN2,
1187 TILEGX_OPC_DBLALIGN4, TILEGX_OPC_DBLALIGN6, TILEGX_OPC_DBLALIGN,
1188 TILEGX_OPC_FDOUBLE_ADDSUB, TILEGX_OPC_FDOUBLE_ADD_FLAGS,
1189 TILEGX_OPC_FDOUBLE_MUL_FLAGS, TILEGX_OPC_FDOUBLE_PACK1,
1190 TILEGX_OPC_FDOUBLE_PACK2, TILEGX_OPC_FDOUBLE_SUB_FLAGS,
1191 BITFIELD(18, 4) /* index 637 */,
1192 TILEGX_OPC_FDOUBLE_UNPACK_MAX, TILEGX_OPC_FDOUBLE_UNPACK_MIN,
1193 TILEGX_OPC_FSINGLE_ADD1, TILEGX_OPC_FSINGLE_ADDSUB2,
1194 TILEGX_OPC_FSINGLE_MUL1, TILEGX_OPC_FSINGLE_MUL2, TILEGX_OPC_FSINGLE_PACK2,
1195 TILEGX_OPC_FSINGLE_SUB1, TILEGX_OPC_MNZ, TILEGX_OPC_MULAX,
1196 TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HS_HU, TILEGX_OPC_MULA_HS_LS,
1197 TILEGX_OPC_MULA_HS_LU, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_HU_LS,
1198 BITFIELD(18, 4) /* index 654 */,
1199 TILEGX_OPC_MULA_HU_LU, TILEGX_OPC_MULA_LS_LS, TILEGX_OPC_MULA_LS_LU,
1200 TILEGX_OPC_MULA_LU_LU, TILEGX_OPC_MULX, TILEGX_OPC_MUL_HS_HS,
1201 TILEGX_OPC_MUL_HS_HU, TILEGX_OPC_MUL_HS_LS, TILEGX_OPC_MUL_HS_LU,
1202 TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_HU_LS, TILEGX_OPC_MUL_HU_LU,
1203 TILEGX_OPC_MUL_LS_LS, TILEGX_OPC_MUL_LS_LU, TILEGX_OPC_MUL_LU_LU,
1204 TILEGX_OPC_MZ,
1205 BITFIELD(18, 4) /* index 671 */,
1206 TILEGX_OPC_NOR, CHILD(688), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX,
1207 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD,
1208 TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL,
1209 TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_SHUFFLEBYTES,
1210 TILEGX_OPC_SUBXSC,
1211 BITFIELD(12, 2) /* index 688 */,
1212 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(693),
1213 BITFIELD(14, 2) /* index 693 */,
1214 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(698),
1215 BITFIELD(16, 2) /* index 698 */,
1216 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
1217 BITFIELD(18, 4) /* index 703 */,
1218 TILEGX_OPC_SUBX, TILEGX_OPC_SUB, CHILD(720), TILEGX_OPC_V1ADDUC,
1219 TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADIFFU, TILEGX_OPC_V1AVGU,
1220 TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU,
1221 TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE,
1222 TILEGX_OPC_V1DDOTPUSA, TILEGX_OPC_V1DDOTPUS, TILEGX_OPC_V1DOTPA,
1223 BITFIELD(12, 4) /* index 720 */,
1224 TILEGX_OPC_NONE, CHILD(737), CHILD(742), CHILD(747), CHILD(752), CHILD(757),
1225 CHILD(762), CHILD(767), CHILD(772), CHILD(777), CHILD(782), CHILD(787),
1226 CHILD(792), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1227 BITFIELD(16, 2) /* index 737 */,
1228 TILEGX_OPC_CLZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1229 BITFIELD(16, 2) /* index 742 */,
1230 TILEGX_OPC_CTZ, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1231 BITFIELD(16, 2) /* index 747 */,
1232 TILEGX_OPC_FNOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1233 BITFIELD(16, 2) /* index 752 */,
1234 TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1235 BITFIELD(16, 2) /* index 757 */,
1236 TILEGX_OPC_NOP, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1237 BITFIELD(16, 2) /* index 762 */,
1238 TILEGX_OPC_PCNT, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1239 BITFIELD(16, 2) /* index 767 */,
1240 TILEGX_OPC_REVBITS, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1241 BITFIELD(16, 2) /* index 772 */,
1242 TILEGX_OPC_REVBYTES, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1243 BITFIELD(16, 2) /* index 777 */,
1244 TILEGX_OPC_TBLIDXB0, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1245 BITFIELD(16, 2) /* index 782 */,
1246 TILEGX_OPC_TBLIDXB1, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1247 BITFIELD(16, 2) /* index 787 */,
1248 TILEGX_OPC_TBLIDXB2, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1249 BITFIELD(16, 2) /* index 792 */,
1250 TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1251 BITFIELD(18, 4) /* index 797 */,
1252 TILEGX_OPC_V1DOTPUSA, TILEGX_OPC_V1DOTPUS, TILEGX_OPC_V1DOTP,
1253 TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1MAXU,
1254 TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MULTU, TILEGX_OPC_V1MULUS,
1255 TILEGX_OPC_V1MULU, TILEGX_OPC_V1MZ, TILEGX_OPC_V1SADAU, TILEGX_OPC_V1SADU,
1256 TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS,
1257 BITFIELD(18, 4) /* index 814 */,
1258 TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC, TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC,
1259 TILEGX_OPC_V2ADD, TILEGX_OPC_V2ADIFFS, TILEGX_OPC_V2AVGS,
1260 TILEGX_OPC_V2CMPEQ, TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU,
1261 TILEGX_OPC_V2CMPLTS, TILEGX_OPC_V2CMPLTU, TILEGX_OPC_V2CMPNE,
1262 TILEGX_OPC_V2DOTPA, TILEGX_OPC_V2DOTP, TILEGX_OPC_V2INT_H,
1263 BITFIELD(18, 4) /* index 831 */,
1264 TILEGX_OPC_V2INT_L, TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ,
1265 TILEGX_OPC_V2MULFSC, TILEGX_OPC_V2MULS, TILEGX_OPC_V2MULTS, TILEGX_OPC_V2MZ,
1266 TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC,
1267 TILEGX_OPC_V2SADAS, TILEGX_OPC_V2SADAU, TILEGX_OPC_V2SADS,
1268 TILEGX_OPC_V2SADU, TILEGX_OPC_V2SHLSC,
1269 BITFIELD(18, 4) /* index 848 */,
1270 TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU, TILEGX_OPC_V2SUBSC,
1271 TILEGX_OPC_V2SUB, TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H,
1272 TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC,
1273 TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC,
1274 TILEGX_OPC_V4SUB,
1275 BITFIELD(18, 3) /* index 865 */,
1276 CHILD(874), CHILD(877), CHILD(880), CHILD(883), CHILD(886), TILEGX_OPC_NONE,
1277 TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1278 BITFIELD(21, 1) /* index 874 */,
1279 TILEGX_OPC_XOR, TILEGX_OPC_NONE,
1280 BITFIELD(21, 1) /* index 877 */,
1281 TILEGX_OPC_V1DDOTPUA, TILEGX_OPC_NONE,
1282 BITFIELD(21, 1) /* index 880 */,
1283 TILEGX_OPC_V1DDOTPU, TILEGX_OPC_NONE,
1284 BITFIELD(21, 1) /* index 883 */,
1285 TILEGX_OPC_V1DOTPUA, TILEGX_OPC_NONE,
1286 BITFIELD(21, 1) /* index 886 */,
1287 TILEGX_OPC_V1DOTPU, TILEGX_OPC_NONE,
1288 BITFIELD(18, 4) /* index 889 */,
1289 TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI,
1290 TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI,
1291 TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI,
1292 TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1293 TILEGX_OPC_NONE,
1294 BITFIELD(0, 2) /* index 906 */,
1295 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1296 CHILD(911),
1297 BITFIELD(2, 2) /* index 911 */,
1298 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1299 CHILD(916),
1300 BITFIELD(4, 2) /* index 916 */,
1301 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1302 CHILD(921),
1303 BITFIELD(6, 2) /* index 921 */,
1304 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1305 CHILD(926),
1306 BITFIELD(8, 2) /* index 926 */,
1307 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1308 CHILD(931),
1309 BITFIELD(10, 2) /* index 931 */,
1310 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1311 TILEGX_OPC_INFOL,
1312};
1313
1314static const unsigned short decode_X1_fsm[1206] =
1315{
1316 BITFIELD(53, 9) /* index 0 */,
1317 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1318 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1319 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1320 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1321 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1322 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1323 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1324 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1325 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1326 CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513), CHILD(513),
1327 CHILD(513), CHILD(513), CHILD(513), CHILD(513), TILEGX_OPC_ADDXLI,
1328 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1329 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1330 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1331 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1332 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1333 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1334 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1335 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1336 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1337 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1338 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1339 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1340 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1341 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1342 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI,
1343 TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_ADDXLI, TILEGX_OPC_NONE,
1344 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1345 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1346 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1347 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1348 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1349 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1350 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1351 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_BEQZT,
1352 TILEGX_OPC_BEQZT, TILEGX_OPC_BEQZ, TILEGX_OPC_BEQZ, TILEGX_OPC_BGEZT,
1353 TILEGX_OPC_BGEZT, TILEGX_OPC_BGEZ, TILEGX_OPC_BGEZ, TILEGX_OPC_BGTZT,
1354 TILEGX_OPC_BGTZT, TILEGX_OPC_BGTZ, TILEGX_OPC_BGTZ, TILEGX_OPC_BLBCT,
1355 TILEGX_OPC_BLBCT, TILEGX_OPC_BLBC, TILEGX_OPC_BLBC, TILEGX_OPC_BLBST,
1356 TILEGX_OPC_BLBST, TILEGX_OPC_BLBS, TILEGX_OPC_BLBS, TILEGX_OPC_BLEZT,
1357 TILEGX_OPC_BLEZT, TILEGX_OPC_BLEZ, TILEGX_OPC_BLEZ, TILEGX_OPC_BLTZT,
1358 TILEGX_OPC_BLTZT, TILEGX_OPC_BLTZ, TILEGX_OPC_BLTZ, TILEGX_OPC_BNEZT,
1359 TILEGX_OPC_BNEZT, TILEGX_OPC_BNEZ, TILEGX_OPC_BNEZ, CHILD(528), CHILD(578),
1360 CHILD(598), CHILD(663), CHILD(683), CHILD(688), CHILD(693), CHILD(698),
1361 CHILD(703), CHILD(708), CHILD(713), CHILD(718), TILEGX_OPC_NONE,
1362 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1363 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1364 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1365 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1366 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1367 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1368 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1369 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1370 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1371 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1372 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1373 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1374 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_JAL,
1375 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1376 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1377 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1378 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1379 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1380 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1381 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL,
1382 TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_JAL, TILEGX_OPC_J, TILEGX_OPC_J,
1383 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1384 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1385 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1386 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1387 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1388 TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J, TILEGX_OPC_J,
1389 CHILD(723), CHILD(740), CHILD(772), CHILD(789), CHILD(1108), CHILD(1125),
1390 CHILD(1142), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1391 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1392 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1393 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1394 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1395 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1396 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1397 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1398 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1399 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1400 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1401 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1402 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1403 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1404 TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1159), TILEGX_OPC_NONE,
1405 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1406 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1407 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1408 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1409 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1410 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1411 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1412 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1413 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1414 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1415 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1416 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1417 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1418 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1419 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1420 TILEGX_OPC_NONE, TILEGX_OPC_NONE, CHILD(1176), CHILD(1176), CHILD(1176),
1421 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1422 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1423 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1424 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1425 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1426 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1427 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1428 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1429 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1430 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1431 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1432 CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176), CHILD(1176),
1433 CHILD(1176),
1434 BITFIELD(37, 2) /* index 513 */,
1435 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(518),
1436 BITFIELD(39, 2) /* index 518 */,
1437 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, CHILD(523),
1438 BITFIELD(41, 2) /* index 523 */,
1439 TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_ADDLI, TILEGX_OPC_MOVELI,
1440 BITFIELD(51, 2) /* index 528 */,
1441 TILEGX_OPC_NONE, CHILD(533), TILEGX_OPC_ADDXI, CHILD(548),
1442 BITFIELD(37, 2) /* index 533 */,
1443 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(538),
1444 BITFIELD(39, 2) /* index 538 */,
1445 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(543),
1446 BITFIELD(41, 2) /* index 543 */,
1447 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
1448 BITFIELD(31, 2) /* index 548 */,
1449 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(553),
1450 BITFIELD(33, 2) /* index 553 */,
1451 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(558),
1452 BITFIELD(35, 2) /* index 558 */,
1453 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(563),
1454 BITFIELD(37, 2) /* index 563 */,
1455 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(568),
1456 BITFIELD(39, 2) /* index 568 */,
1457 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(573),
1458 BITFIELD(41, 2) /* index 573 */,
1459 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
1460 BITFIELD(51, 2) /* index 578 */,
1461 TILEGX_OPC_CMPEQI, TILEGX_OPC_CMPLTSI, TILEGX_OPC_CMPLTUI, CHILD(583),
1462 BITFIELD(31, 2) /* index 583 */,
1463 TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(588),
1464 BITFIELD(33, 2) /* index 588 */,
1465 TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, CHILD(593),
1466 BITFIELD(35, 2) /* index 593 */,
1467 TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD, TILEGX_OPC_LD1S_ADD,
1468 TILEGX_OPC_PREFETCH_ADD_L1_FAULT,
1469 BITFIELD(51, 2) /* index 598 */,
1470 CHILD(603), CHILD(618), CHILD(633), CHILD(648),
1471 BITFIELD(31, 2) /* index 603 */,
1472 TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(608),
1473 BITFIELD(33, 2) /* index 608 */,
1474 TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, CHILD(613),
1475 BITFIELD(35, 2) /* index 613 */,
1476 TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD, TILEGX_OPC_LD1U_ADD,
1477 TILEGX_OPC_PREFETCH_ADD_L1,
1478 BITFIELD(31, 2) /* index 618 */,
1479 TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(623),
1480 BITFIELD(33, 2) /* index 623 */,
1481 TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, CHILD(628),
1482 BITFIELD(35, 2) /* index 628 */,
1483 TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD, TILEGX_OPC_LD2S_ADD,
1484 TILEGX_OPC_PREFETCH_ADD_L2_FAULT,
1485 BITFIELD(31, 2) /* index 633 */,
1486 TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(638),
1487 BITFIELD(33, 2) /* index 638 */,
1488 TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, CHILD(643),
1489 BITFIELD(35, 2) /* index 643 */,
1490 TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD, TILEGX_OPC_LD2U_ADD,
1491 TILEGX_OPC_PREFETCH_ADD_L2,
1492 BITFIELD(31, 2) /* index 648 */,
1493 TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, CHILD(653),
1494 BITFIELD(33, 2) /* index 653 */,
1495 TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, CHILD(658),
1496 BITFIELD(35, 2) /* index 658 */,
1497 TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD, TILEGX_OPC_LD4S_ADD,
1498 TILEGX_OPC_PREFETCH_ADD_L3_FAULT,
1499 BITFIELD(51, 2) /* index 663 */,
1500 CHILD(668), TILEGX_OPC_LDNT1S_ADD, TILEGX_OPC_LDNT1U_ADD,
1501 TILEGX_OPC_LDNT2S_ADD,
1502 BITFIELD(31, 2) /* index 668 */,
1503 TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(673),
1504 BITFIELD(33, 2) /* index 673 */,
1505 TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, CHILD(678),
1506 BITFIELD(35, 2) /* index 678 */,
1507 TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD, TILEGX_OPC_LD4U_ADD,
1508 TILEGX_OPC_PREFETCH_ADD_L3,
1509 BITFIELD(51, 2) /* index 683 */,
1510 TILEGX_OPC_LDNT2U_ADD, TILEGX_OPC_LDNT4S_ADD, TILEGX_OPC_LDNT4U_ADD,
1511 TILEGX_OPC_LDNT_ADD,
1512 BITFIELD(51, 2) /* index 688 */,
1513 TILEGX_OPC_LD_ADD, TILEGX_OPC_LDNA_ADD, TILEGX_OPC_MFSPR, TILEGX_OPC_MTSPR,
1514 BITFIELD(51, 2) /* index 693 */,
1515 TILEGX_OPC_ORI, TILEGX_OPC_ST1_ADD, TILEGX_OPC_ST2_ADD, TILEGX_OPC_ST4_ADD,
1516 BITFIELD(51, 2) /* index 698 */,
1517 TILEGX_OPC_STNT1_ADD, TILEGX_OPC_STNT2_ADD, TILEGX_OPC_STNT4_ADD,
1518 TILEGX_OPC_STNT_ADD,
1519 BITFIELD(51, 2) /* index 703 */,
1520 TILEGX_OPC_ST_ADD, TILEGX_OPC_V1ADDI, TILEGX_OPC_V1CMPEQI,
1521 TILEGX_OPC_V1CMPLTSI,
1522 BITFIELD(51, 2) /* index 708 */,
1523 TILEGX_OPC_V1CMPLTUI, TILEGX_OPC_V1MAXUI, TILEGX_OPC_V1MINUI,
1524 TILEGX_OPC_V2ADDI,
1525 BITFIELD(51, 2) /* index 713 */,
1526 TILEGX_OPC_V2CMPEQI, TILEGX_OPC_V2CMPLTSI, TILEGX_OPC_V2CMPLTUI,
1527 TILEGX_OPC_V2MAXSI,
1528 BITFIELD(51, 2) /* index 718 */,
1529 TILEGX_OPC_V2MINSI, TILEGX_OPC_XORI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1530 BITFIELD(49, 4) /* index 723 */,
1531 TILEGX_OPC_NONE, TILEGX_OPC_ADDXSC, TILEGX_OPC_ADDX, TILEGX_OPC_ADD,
1532 TILEGX_OPC_AND, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPEXCH4, TILEGX_OPC_CMPEXCH,
1533 TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
1534 TILEGX_OPC_CMPNE, TILEGX_OPC_DBLALIGN2, TILEGX_OPC_DBLALIGN4,
1535 TILEGX_OPC_DBLALIGN6,
1536 BITFIELD(49, 4) /* index 740 */,
1537 TILEGX_OPC_EXCH4, TILEGX_OPC_EXCH, TILEGX_OPC_FETCHADD4,
1538 TILEGX_OPC_FETCHADDGEZ4, TILEGX_OPC_FETCHADDGEZ, TILEGX_OPC_FETCHADD,
1539 TILEGX_OPC_FETCHAND4, TILEGX_OPC_FETCHAND, TILEGX_OPC_FETCHOR4,
1540 TILEGX_OPC_FETCHOR, TILEGX_OPC_MNZ, TILEGX_OPC_MZ, TILEGX_OPC_NOR,
1541 CHILD(757), TILEGX_OPC_ROTL, TILEGX_OPC_SHL1ADDX,
1542 BITFIELD(43, 2) /* index 757 */,
1543 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(762),
1544 BITFIELD(45, 2) /* index 762 */,
1545 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(767),
1546 BITFIELD(47, 2) /* index 767 */,
1547 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
1548 BITFIELD(49, 4) /* index 772 */,
1549 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADDX, TILEGX_OPC_SHL2ADD,
1550 TILEGX_OPC_SHL3ADDX, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHLX, TILEGX_OPC_SHL,
1551 TILEGX_OPC_SHRS, TILEGX_OPC_SHRUX, TILEGX_OPC_SHRU, TILEGX_OPC_ST1,
1552 TILEGX_OPC_ST2, TILEGX_OPC_ST4, TILEGX_OPC_STNT1, TILEGX_OPC_STNT2,
1553 TILEGX_OPC_STNT4,
1554 BITFIELD(46, 7) /* index 789 */,
1555 TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT,
1556 TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT, TILEGX_OPC_STNT,
1557 TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST,
1558 TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_ST, TILEGX_OPC_SUBXSC,
1559 TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC,
1560 TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBXSC, TILEGX_OPC_SUBX,
1561 TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX,
1562 TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
1563 TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB,
1564 TILEGX_OPC_SUB, TILEGX_OPC_SUB, TILEGX_OPC_SUB, CHILD(918), CHILD(927),
1565 CHILD(1006), CHILD(1090), CHILD(1099), TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1566 TILEGX_OPC_NONE, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC,
1567 TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC,
1568 TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADDUC, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD,
1569 TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD,
1570 TILEGX_OPC_V1ADD, TILEGX_OPC_V1ADD, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
1571 TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
1572 TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ, TILEGX_OPC_V1CMPEQ,
1573 TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES,
1574 TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES,
1575 TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLES, TILEGX_OPC_V1CMPLEU,
1576 TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU,
1577 TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLEU,
1578 TILEGX_OPC_V1CMPLEU, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
1579 TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
1580 TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS, TILEGX_OPC_V1CMPLTS,
1581 TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU,
1582 TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU,
1583 TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPLTU, TILEGX_OPC_V1CMPNE,
1584 TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE,
1585 TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1CMPNE,
1586 TILEGX_OPC_V1CMPNE, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
1587 TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
1588 TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H, TILEGX_OPC_V1INT_H,
1589 TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
1590 TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
1591 TILEGX_OPC_V1INT_L, TILEGX_OPC_V1INT_L,
1592 BITFIELD(43, 3) /* index 918 */,
1593 TILEGX_OPC_NONE, TILEGX_OPC_DRAIN, TILEGX_OPC_DTLBPR, TILEGX_OPC_FINV,
1594 TILEGX_OPC_FLUSHWB, TILEGX_OPC_FLUSH, TILEGX_OPC_FNOP, TILEGX_OPC_ICOH,
1595 BITFIELD(43, 3) /* index 927 */,
1596 CHILD(936), TILEGX_OPC_INV, TILEGX_OPC_IRET, TILEGX_OPC_JALRP,
1597 TILEGX_OPC_JALR, TILEGX_OPC_JRP, TILEGX_OPC_JR, CHILD(991),
1598 BITFIELD(31, 2) /* index 936 */,
1599 CHILD(941), CHILD(966), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1600 BITFIELD(33, 2) /* index 941 */,
1601 TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(946),
1602 BITFIELD(35, 2) /* index 946 */,
1603 TILEGX_OPC_ILL, CHILD(951), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1604 BITFIELD(37, 2) /* index 951 */,
1605 TILEGX_OPC_ILL, CHILD(956), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1606 BITFIELD(39, 2) /* index 956 */,
1607 TILEGX_OPC_ILL, CHILD(961), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1608 BITFIELD(41, 2) /* index 961 */,
1609 TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_BPT, TILEGX_OPC_ILL,
1610 BITFIELD(33, 2) /* index 966 */,
1611 TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_ILL, CHILD(971),
1612 BITFIELD(35, 2) /* index 971 */,
1613 TILEGX_OPC_ILL, CHILD(976), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1614 BITFIELD(37, 2) /* index 976 */,
1615 TILEGX_OPC_ILL, CHILD(981), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1616 BITFIELD(39, 2) /* index 981 */,
1617 TILEGX_OPC_ILL, CHILD(986), TILEGX_OPC_ILL, TILEGX_OPC_ILL,
1618 BITFIELD(41, 2) /* index 986 */,
1619 TILEGX_OPC_ILL, TILEGX_OPC_ILL, TILEGX_OPC_RAISE, TILEGX_OPC_ILL,
1620 BITFIELD(31, 2) /* index 991 */,
1621 TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(996),
1622 BITFIELD(33, 2) /* index 996 */,
1623 TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(1001),
1624 BITFIELD(35, 2) /* index 1001 */,
1625 TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S,
1626 TILEGX_OPC_PREFETCH_L1_FAULT,
1627 BITFIELD(43, 3) /* index 1006 */,
1628 CHILD(1015), CHILD(1030), CHILD(1045), CHILD(1060), CHILD(1075),
1629 TILEGX_OPC_LDNA, TILEGX_OPC_LDNT1S, TILEGX_OPC_LDNT1U,
1630 BITFIELD(31, 2) /* index 1015 */,
1631 TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1020),
1632 BITFIELD(33, 2) /* index 1020 */,
1633 TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(1025),
1634 BITFIELD(35, 2) /* index 1025 */,
1635 TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH,
1636 BITFIELD(31, 2) /* index 1030 */,
1637 TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1035),
1638 BITFIELD(33, 2) /* index 1035 */,
1639 TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(1040),
1640 BITFIELD(35, 2) /* index 1040 */,
1641 TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S,
1642 TILEGX_OPC_PREFETCH_L2_FAULT,
1643 BITFIELD(31, 2) /* index 1045 */,
1644 TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1050),
1645 BITFIELD(33, 2) /* index 1050 */,
1646 TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(1055),
1647 BITFIELD(35, 2) /* index 1055 */,
1648 TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2,
1649 BITFIELD(31, 2) /* index 1060 */,
1650 TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1065),
1651 BITFIELD(33, 2) /* index 1065 */,
1652 TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(1070),
1653 BITFIELD(35, 2) /* index 1070 */,
1654 TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S,
1655 TILEGX_OPC_PREFETCH_L3_FAULT,
1656 BITFIELD(31, 2) /* index 1075 */,
1657 TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1080),
1658 BITFIELD(33, 2) /* index 1080 */,
1659 TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(1085),
1660 BITFIELD(35, 2) /* index 1085 */,
1661 TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3,
1662 BITFIELD(43, 3) /* index 1090 */,
1663 TILEGX_OPC_LDNT2S, TILEGX_OPC_LDNT2U, TILEGX_OPC_LDNT4S, TILEGX_OPC_LDNT4U,
1664 TILEGX_OPC_LDNT, TILEGX_OPC_LD, TILEGX_OPC_LNK, TILEGX_OPC_MF,
1665 BITFIELD(43, 3) /* index 1099 */,
1666 TILEGX_OPC_NAP, TILEGX_OPC_NOP, TILEGX_OPC_SWINT0, TILEGX_OPC_SWINT1,
1667 TILEGX_OPC_SWINT2, TILEGX_OPC_SWINT3, TILEGX_OPC_WH64, TILEGX_OPC_NONE,
1668 BITFIELD(49, 4) /* index 1108 */,
1669 TILEGX_OPC_V1MAXU, TILEGX_OPC_V1MINU, TILEGX_OPC_V1MNZ, TILEGX_OPC_V1MZ,
1670 TILEGX_OPC_V1SHL, TILEGX_OPC_V1SHRS, TILEGX_OPC_V1SHRU, TILEGX_OPC_V1SUBUC,
1671 TILEGX_OPC_V1SUB, TILEGX_OPC_V2ADDSC, TILEGX_OPC_V2ADD, TILEGX_OPC_V2CMPEQ,
1672 TILEGX_OPC_V2CMPLES, TILEGX_OPC_V2CMPLEU, TILEGX_OPC_V2CMPLTS,
1673 TILEGX_OPC_V2CMPLTU,
1674 BITFIELD(49, 4) /* index 1125 */,
1675 TILEGX_OPC_V2CMPNE, TILEGX_OPC_V2INT_H, TILEGX_OPC_V2INT_L,
1676 TILEGX_OPC_V2MAXS, TILEGX_OPC_V2MINS, TILEGX_OPC_V2MNZ, TILEGX_OPC_V2MZ,
1677 TILEGX_OPC_V2PACKH, TILEGX_OPC_V2PACKL, TILEGX_OPC_V2PACKUC,
1678 TILEGX_OPC_V2SHLSC, TILEGX_OPC_V2SHL, TILEGX_OPC_V2SHRS, TILEGX_OPC_V2SHRU,
1679 TILEGX_OPC_V2SUBSC, TILEGX_OPC_V2SUB,
1680 BITFIELD(49, 4) /* index 1142 */,
1681 TILEGX_OPC_V4ADDSC, TILEGX_OPC_V4ADD, TILEGX_OPC_V4INT_H,
1682 TILEGX_OPC_V4INT_L, TILEGX_OPC_V4PACKSC, TILEGX_OPC_V4SHLSC,
1683 TILEGX_OPC_V4SHL, TILEGX_OPC_V4SHRS, TILEGX_OPC_V4SHRU, TILEGX_OPC_V4SUBSC,
1684 TILEGX_OPC_V4SUB, TILEGX_OPC_XOR, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1685 TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1686 BITFIELD(49, 4) /* index 1159 */,
1687 TILEGX_OPC_NONE, TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHLXI,
1688 TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI, TILEGX_OPC_SHRUXI, TILEGX_OPC_V1SHLI,
1689 TILEGX_OPC_V1SHRSI, TILEGX_OPC_V1SHRUI, TILEGX_OPC_V2SHLI,
1690 TILEGX_OPC_V2SHRSI, TILEGX_OPC_V2SHRUI, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1691 TILEGX_OPC_NONE,
1692 BITFIELD(31, 2) /* index 1176 */,
1693 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1694 CHILD(1181),
1695 BITFIELD(33, 2) /* index 1181 */,
1696 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1697 CHILD(1186),
1698 BITFIELD(35, 2) /* index 1186 */,
1699 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1700 CHILD(1191),
1701 BITFIELD(37, 2) /* index 1191 */,
1702 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1703 CHILD(1196),
1704 BITFIELD(39, 2) /* index 1196 */,
1705 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1706 CHILD(1201),
1707 BITFIELD(41, 2) /* index 1201 */,
1708 TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI, TILEGX_OPC_SHL16INSLI,
1709 TILEGX_OPC_INFOL,
1710};
1711
1712static const unsigned short decode_Y0_fsm[178] =
1713{
1714 BITFIELD(27, 4) /* index 0 */,
1715 CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI,
1716 TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(118), CHILD(123),
1717 CHILD(128), CHILD(133), CHILD(153), CHILD(158), CHILD(163), CHILD(168),
1718 CHILD(173),
1719 BITFIELD(6, 2) /* index 17 */,
1720 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22),
1721 BITFIELD(8, 2) /* index 22 */,
1722 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27),
1723 BITFIELD(10, 2) /* index 27 */,
1724 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
1725 BITFIELD(0, 2) /* index 32 */,
1726 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37),
1727 BITFIELD(2, 2) /* index 37 */,
1728 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42),
1729 BITFIELD(4, 2) /* index 42 */,
1730 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47),
1731 BITFIELD(6, 2) /* index 47 */,
1732 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52),
1733 BITFIELD(8, 2) /* index 52 */,
1734 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57),
1735 BITFIELD(10, 2) /* index 57 */,
1736 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
1737 BITFIELD(18, 2) /* index 62 */,
1738 TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
1739 BITFIELD(15, 5) /* index 67 */,
1740 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
1741 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
1742 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD,
1743 TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
1744 TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
1745 TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD,
1746 TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD,
1747 TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(100),
1748 CHILD(109), TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1749 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1750 BITFIELD(12, 3) /* index 100 */,
1751 TILEGX_OPC_NONE, TILEGX_OPC_CLZ, TILEGX_OPC_CTZ, TILEGX_OPC_FNOP,
1752 TILEGX_OPC_FSINGLE_PACK1, TILEGX_OPC_NOP, TILEGX_OPC_PCNT,
1753 TILEGX_OPC_REVBITS,
1754 BITFIELD(12, 3) /* index 109 */,
1755 TILEGX_OPC_REVBYTES, TILEGX_OPC_TBLIDXB0, TILEGX_OPC_TBLIDXB1,
1756 TILEGX_OPC_TBLIDXB2, TILEGX_OPC_TBLIDXB3, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1757 TILEGX_OPC_NONE,
1758 BITFIELD(18, 2) /* index 118 */,
1759 TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
1760 BITFIELD(18, 2) /* index 123 */,
1761 TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE, TILEGX_OPC_MULAX, TILEGX_OPC_MULX,
1762 BITFIELD(18, 2) /* index 128 */,
1763 TILEGX_OPC_CMOVEQZ, TILEGX_OPC_CMOVNEZ, TILEGX_OPC_MNZ, TILEGX_OPC_MZ,
1764 BITFIELD(18, 2) /* index 133 */,
1765 TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(138), TILEGX_OPC_XOR,
1766 BITFIELD(12, 2) /* index 138 */,
1767 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(143),
1768 BITFIELD(14, 2) /* index 143 */,
1769 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(148),
1770 BITFIELD(16, 2) /* index 148 */,
1771 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
1772 BITFIELD(18, 2) /* index 153 */,
1773 TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU,
1774 BITFIELD(18, 2) /* index 158 */,
1775 TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX,
1776 TILEGX_OPC_SHL3ADDX,
1777 BITFIELD(18, 2) /* index 163 */,
1778 TILEGX_OPC_MUL_HS_HS, TILEGX_OPC_MUL_HU_HU, TILEGX_OPC_MUL_LS_LS,
1779 TILEGX_OPC_MUL_LU_LU,
1780 BITFIELD(18, 2) /* index 168 */,
1781 TILEGX_OPC_MULA_HS_HS, TILEGX_OPC_MULA_HU_HU, TILEGX_OPC_MULA_LS_LS,
1782 TILEGX_OPC_MULA_LU_LU,
1783 BITFIELD(18, 2) /* index 173 */,
1784 TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI,
1785};
1786
1787static const unsigned short decode_Y1_fsm[167] =
1788{
1789 BITFIELD(58, 4) /* index 0 */,
1790 TILEGX_OPC_NONE, CHILD(17), TILEGX_OPC_ADDXI, CHILD(32), TILEGX_OPC_CMPEQI,
1791 TILEGX_OPC_CMPLTSI, CHILD(62), CHILD(67), CHILD(117), CHILD(122),
1792 CHILD(127), CHILD(132), CHILD(152), CHILD(157), CHILD(162), TILEGX_OPC_NONE,
1793 BITFIELD(37, 2) /* index 17 */,
1794 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(22),
1795 BITFIELD(39, 2) /* index 22 */,
1796 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, CHILD(27),
1797 BITFIELD(41, 2) /* index 27 */,
1798 TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_ADDI, TILEGX_OPC_MOVEI,
1799 BITFIELD(31, 2) /* index 32 */,
1800 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(37),
1801 BITFIELD(33, 2) /* index 37 */,
1802 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(42),
1803 BITFIELD(35, 2) /* index 42 */,
1804 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(47),
1805 BITFIELD(37, 2) /* index 47 */,
1806 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(52),
1807 BITFIELD(39, 2) /* index 52 */,
1808 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, CHILD(57),
1809 BITFIELD(41, 2) /* index 57 */,
1810 TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_ANDI, TILEGX_OPC_INFO,
1811 BITFIELD(49, 2) /* index 62 */,
1812 TILEGX_OPC_ADDX, TILEGX_OPC_ADD, TILEGX_OPC_SUBX, TILEGX_OPC_SUB,
1813 BITFIELD(47, 4) /* index 67 */,
1814 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL1ADD,
1815 TILEGX_OPC_SHL1ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD,
1816 TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL2ADD, TILEGX_OPC_SHL3ADD,
1817 TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, TILEGX_OPC_SHL3ADD, CHILD(84),
1818 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_NONE,
1819 BITFIELD(43, 3) /* index 84 */,
1820 CHILD(93), CHILD(96), CHILD(99), CHILD(102), CHILD(105), CHILD(108),
1821 CHILD(111), CHILD(114),
1822 BITFIELD(46, 1) /* index 93 */,
1823 TILEGX_OPC_NONE, TILEGX_OPC_FNOP,
1824 BITFIELD(46, 1) /* index 96 */,
1825 TILEGX_OPC_NONE, TILEGX_OPC_ILL,
1826 BITFIELD(46, 1) /* index 99 */,
1827 TILEGX_OPC_NONE, TILEGX_OPC_JALRP,
1828 BITFIELD(46, 1) /* index 102 */,
1829 TILEGX_OPC_NONE, TILEGX_OPC_JALR,
1830 BITFIELD(46, 1) /* index 105 */,
1831 TILEGX_OPC_NONE, TILEGX_OPC_JRP,
1832 BITFIELD(46, 1) /* index 108 */,
1833 TILEGX_OPC_NONE, TILEGX_OPC_JR,
1834 BITFIELD(46, 1) /* index 111 */,
1835 TILEGX_OPC_NONE, TILEGX_OPC_LNK,
1836 BITFIELD(46, 1) /* index 114 */,
1837 TILEGX_OPC_NONE, TILEGX_OPC_NOP,
1838 BITFIELD(49, 2) /* index 117 */,
1839 TILEGX_OPC_CMPLES, TILEGX_OPC_CMPLEU, TILEGX_OPC_CMPLTS, TILEGX_OPC_CMPLTU,
1840 BITFIELD(49, 2) /* index 122 */,
1841 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_CMPEQ, TILEGX_OPC_CMPNE,
1842 BITFIELD(49, 2) /* index 127 */,
1843 TILEGX_OPC_NONE, TILEGX_OPC_NONE, TILEGX_OPC_MNZ, TILEGX_OPC_MZ,
1844 BITFIELD(49, 2) /* index 132 */,
1845 TILEGX_OPC_AND, TILEGX_OPC_NOR, CHILD(137), TILEGX_OPC_XOR,
1846 BITFIELD(43, 2) /* index 137 */,
1847 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(142),
1848 BITFIELD(45, 2) /* index 142 */,
1849 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, CHILD(147),
1850 BITFIELD(47, 2) /* index 147 */,
1851 TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_OR, TILEGX_OPC_MOVE,
1852 BITFIELD(49, 2) /* index 152 */,
1853 TILEGX_OPC_ROTL, TILEGX_OPC_SHL, TILEGX_OPC_SHRS, TILEGX_OPC_SHRU,
1854 BITFIELD(49, 2) /* index 157 */,
1855 TILEGX_OPC_NONE, TILEGX_OPC_SHL1ADDX, TILEGX_OPC_SHL2ADDX,
1856 TILEGX_OPC_SHL3ADDX,
1857 BITFIELD(49, 2) /* index 162 */,
1858 TILEGX_OPC_ROTLI, TILEGX_OPC_SHLI, TILEGX_OPC_SHRSI, TILEGX_OPC_SHRUI,
1859};
1860
1861static const unsigned short decode_Y2_fsm[118] =
1862{
1863 BITFIELD(62, 2) /* index 0 */,
1864 TILEGX_OPC_NONE, CHILD(5), CHILD(66), CHILD(109),
1865 BITFIELD(55, 3) /* index 5 */,
1866 CHILD(14), CHILD(14), CHILD(14), CHILD(17), CHILD(40), CHILD(40), CHILD(40),
1867 CHILD(43),
1868 BITFIELD(26, 1) /* index 14 */,
1869 TILEGX_OPC_LD1S, TILEGX_OPC_LD1U,
1870 BITFIELD(26, 1) /* index 17 */,
1871 CHILD(20), CHILD(30),
1872 BITFIELD(51, 2) /* index 20 */,
1873 TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, CHILD(25),
1874 BITFIELD(53, 2) /* index 25 */,
1875 TILEGX_OPC_LD1S, TILEGX_OPC_LD1S, TILEGX_OPC_LD1S,
1876 TILEGX_OPC_PREFETCH_L1_FAULT,
1877 BITFIELD(51, 2) /* index 30 */,
1878 TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, CHILD(35),
1879 BITFIELD(53, 2) /* index 35 */,
1880 TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_LD1U, TILEGX_OPC_PREFETCH,
1881 BITFIELD(26, 1) /* index 40 */,
1882 TILEGX_OPC_LD2S, TILEGX_OPC_LD2U,
1883 BITFIELD(26, 1) /* index 43 */,
1884 CHILD(46), CHILD(56),
1885 BITFIELD(51, 2) /* index 46 */,
1886 TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, CHILD(51),
1887 BITFIELD(53, 2) /* index 51 */,
1888 TILEGX_OPC_LD2S, TILEGX_OPC_LD2S, TILEGX_OPC_LD2S,
1889 TILEGX_OPC_PREFETCH_L2_FAULT,
1890 BITFIELD(51, 2) /* index 56 */,
1891 TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, CHILD(61),
1892 BITFIELD(53, 2) /* index 61 */,
1893 TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_LD2U, TILEGX_OPC_PREFETCH_L2,
1894 BITFIELD(56, 2) /* index 66 */,
1895 CHILD(71), CHILD(74), CHILD(90), CHILD(93),
1896 BITFIELD(26, 1) /* index 71 */,
1897 TILEGX_OPC_NONE, TILEGX_OPC_LD4S,
1898 BITFIELD(26, 1) /* index 74 */,
1899 TILEGX_OPC_NONE, CHILD(77),
1900 BITFIELD(51, 2) /* index 77 */,
1901 TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(82),
1902 BITFIELD(53, 2) /* index 82 */,
1903 TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, TILEGX_OPC_LD4S, CHILD(87),
1904 BITFIELD(55, 1) /* index 87 */,
1905 TILEGX_OPC_LD4S, TILEGX_OPC_PREFETCH_L3_FAULT,
1906 BITFIELD(26, 1) /* index 90 */,
1907 TILEGX_OPC_LD4U, TILEGX_OPC_LD,
1908 BITFIELD(26, 1) /* index 93 */,
1909 CHILD(96), TILEGX_OPC_LD,
1910 BITFIELD(51, 2) /* index 96 */,
1911 TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(101),
1912 BITFIELD(53, 2) /* index 101 */,
1913 TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, TILEGX_OPC_LD4U, CHILD(106),
1914 BITFIELD(55, 1) /* index 106 */,
1915 TILEGX_OPC_LD4U, TILEGX_OPC_PREFETCH_L3,
1916 BITFIELD(26, 1) /* index 109 */,
1917 CHILD(112), CHILD(115),
1918 BITFIELD(57, 1) /* index 112 */,
1919 TILEGX_OPC_ST1, TILEGX_OPC_ST4,
1920 BITFIELD(57, 1) /* index 115 */,
1921 TILEGX_OPC_ST2, TILEGX_OPC_ST,
1922};
1923
1924#undef BITFIELD
1925#undef CHILD
1926const unsigned short * const
1927tilegx_bundle_decoder_fsms[TILEGX_NUM_PIPELINE_ENCODINGS] =
1928{
1929 decode_X0_fsm,
1930 decode_X1_fsm,
1931 decode_Y0_fsm,
1932 decode_Y1_fsm,
1933 decode_Y2_fsm
1934};
1935const struct tilegx_operand tilegx_operands[35] =
1936{
1937 {
1938 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X0),
1939 8, 1, 0, 0, 0, 0,
1940 create_Imm8_X0, get_Imm8_X0
1941 },
1942 {
1943 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_X1),
1944 8, 1, 0, 0, 0, 0,
1945 create_Imm8_X1, get_Imm8_X1
1946 },
1947 {
1948 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y0),
1949 8, 1, 0, 0, 0, 0,
1950 create_Imm8_Y0, get_Imm8_Y0
1951 },
1952 {
1953 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM8_Y1),
1954 8, 1, 0, 0, 0, 0,
1955 create_Imm8_Y1, get_Imm8_Y1
1956 },
1957 {
1958 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X0_HW0_LAST),
1959 16, 1, 0, 0, 0, 0,
1960 create_Imm16_X0, get_Imm16_X0
1961 },
1962 {
1963 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_IMM16_X1_HW0_LAST),
1964 16, 1, 0, 0, 0, 0,
1965 create_Imm16_X1, get_Imm16_X1
1966 },
1967 {
1968 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1969 6, 0, 0, 1, 0, 0,
1970 create_Dest_X0, get_Dest_X0
1971 },
1972 {
1973 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1974 6, 0, 1, 0, 0, 0,
1975 create_SrcA_X0, get_SrcA_X0
1976 },
1977 {
1978 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1979 6, 0, 0, 1, 0, 0,
1980 create_Dest_X1, get_Dest_X1
1981 },
1982 {
1983 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1984 6, 0, 1, 0, 0, 0,
1985 create_SrcA_X1, get_SrcA_X1
1986 },
1987 {
1988 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1989 6, 0, 0, 1, 0, 0,
1990 create_Dest_Y0, get_Dest_Y0
1991 },
1992 {
1993 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1994 6, 0, 1, 0, 0, 0,
1995 create_SrcA_Y0, get_SrcA_Y0
1996 },
1997 {
1998 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
1999 6, 0, 0, 1, 0, 0,
2000 create_Dest_Y1, get_Dest_Y1
2001 },
2002 {
2003 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2004 6, 0, 1, 0, 0, 0,
2005 create_SrcA_Y1, get_SrcA_Y1
2006 },
2007 {
2008 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2009 6, 0, 1, 0, 0, 0,
2010 create_SrcA_Y2, get_SrcA_Y2
2011 },
2012 {
2013 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2014 6, 0, 1, 1, 0, 0,
2015 create_SrcA_X1, get_SrcA_X1
2016 },
2017 {
2018 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2019 6, 0, 1, 0, 0, 0,
2020 create_SrcB_X0, get_SrcB_X0
2021 },
2022 {
2023 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2024 6, 0, 1, 0, 0, 0,
2025 create_SrcB_X1, get_SrcB_X1
2026 },
2027 {
2028 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2029 6, 0, 1, 0, 0, 0,
2030 create_SrcB_Y0, get_SrcB_Y0
2031 },
2032 {
2033 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2034 6, 0, 1, 0, 0, 0,
2035 create_SrcB_Y1, get_SrcB_Y1
2036 },
2037 {
2038 TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_BROFF_X1),
2039 17, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
2040 create_BrOff_X1, get_BrOff_X1
2041 },
2042 {
2043 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE),
2044 6, 0, 0, 0, 0, 0,
2045 create_BFStart_X0, get_BFStart_X0
2046 },
2047 {
2048 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE),
2049 6, 0, 0, 0, 0, 0,
2050 create_BFEnd_X0, get_BFEnd_X0
2051 },
2052 {
2053 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2054 6, 0, 1, 1, 0, 0,
2055 create_Dest_X0, get_Dest_X0
2056 },
2057 {
2058 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2059 6, 0, 1, 1, 0, 0,
2060 create_Dest_Y0, get_Dest_Y0
2061 },
2062 {
2063 TILEGX_OP_TYPE_ADDRESS, BFD_RELOC(TILEGX_JUMPOFF_X1),
2064 27, 1, 0, 0, 1, TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES,
2065 create_JumpOff_X1, get_JumpOff_X1
2066 },
2067 {
2068 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2069 6, 0, 0, 1, 0, 0,
2070 create_SrcBDest_Y2, get_SrcBDest_Y2
2071 },
2072 {
2073 TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MF_IMM14_X1),
2074 14, 0, 0, 0, 0, 0,
2075 create_MF_Imm14_X1, get_MF_Imm14_X1
2076 },
2077 {
2078 TILEGX_OP_TYPE_SPR, BFD_RELOC(TILEGX_MT_IMM14_X1),
2079 14, 0, 0, 0, 0, 0,
2080 create_MT_Imm14_X1, get_MT_Imm14_X1
2081 },
2082 {
2083 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X0),
2084 6, 0, 0, 0, 0, 0,
2085 create_ShAmt_X0, get_ShAmt_X0
2086 },
2087 {
2088 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_X1),
2089 6, 0, 0, 0, 0, 0,
2090 create_ShAmt_X1, get_ShAmt_X1
2091 },
2092 {
2093 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y0),
2094 6, 0, 0, 0, 0, 0,
2095 create_ShAmt_Y0, get_ShAmt_Y0
2096 },
2097 {
2098 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_SHAMT_Y1),
2099 6, 0, 0, 0, 0, 0,
2100 create_ShAmt_Y1, get_ShAmt_Y1
2101 },
2102 {
2103 TILEGX_OP_TYPE_REGISTER, BFD_RELOC(NONE),
2104 6, 0, 1, 0, 0, 0,
2105 create_SrcBDest_Y2, get_SrcBDest_Y2
2106 },
2107 {
2108 TILEGX_OP_TYPE_IMMEDIATE, BFD_RELOC(TILEGX_DEST_IMM8_X1),
2109 8, 1, 0, 0, 0, 0,
2110 create_Dest_Imm8_X1, get_Dest_Imm8_X1
2111 }
2112};
2113
2114
2115
2116
2117/* Given a set of bundle bits and the lookup FSM for a specific pipe,
2118 * returns which instruction the bundle contains in that pipe.
2119 */
2120static const struct tilegx_opcode *
2121find_opcode(tilegx_bundle_bits bits, const unsigned short *table)
2122{
2123 int index = 0;
2124
2125 while (1)
2126 {
2127 unsigned short bitspec = table[index];
2128 unsigned int bitfield =
2129 ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6);
2130
2131 unsigned short next = table[index + 1 + bitfield];
2132 if (next <= TILEGX_OPC_NONE)
2133 return &tilegx_opcodes[next];
2134
2135 index = next - TILEGX_OPC_NONE;
2136 }
2137}
2138
2139
2140int
2141parse_insn_tilegx(tilegx_bundle_bits bits,
2142 unsigned long long pc,
2143 struct tilegx_decoded_instruction
2144 decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE])
2145{
2146 int num_instructions = 0;
2147 int pipe;
2148
2149 int min_pipe, max_pipe;
2150 if ((bits & TILEGX_BUNDLE_MODE_MASK) == 0)
2151 {
2152 min_pipe = TILEGX_PIPELINE_X0;
2153 max_pipe = TILEGX_PIPELINE_X1;
2154 }
2155 else
2156 {
2157 min_pipe = TILEGX_PIPELINE_Y0;
2158 max_pipe = TILEGX_PIPELINE_Y2;
2159 }
2160
2161 /* For each pipe, find an instruction that fits. */
2162 for (pipe = min_pipe; pipe <= max_pipe; pipe++)
2163 {
2164 const struct tilegx_opcode *opc;
2165 struct tilegx_decoded_instruction *d;
2166 int i;
2167
2168 d = &decoded[num_instructions++];
2169 opc = find_opcode (bits, tilegx_bundle_decoder_fsms[pipe]);
2170 d->opcode = opc;
2171
2172 /* Decode each operand, sign extending, etc. as appropriate. */
2173 for (i = 0; i < opc->num_operands; i++)
2174 {
2175 const struct tilegx_operand *op =
2176 &tilegx_operands[opc->operands[pipe][i]];
2177 int raw_opval = op->extract (bits);
2178 long long opval;
2179
2180 if (op->is_signed)
2181 {
2182 /* Sign-extend the operand. */
2183 int shift = (int)((sizeof(int) * 8) - op->num_bits);
2184 raw_opval = (raw_opval << shift) >> shift;
2185 }
2186
2187 /* Adjust PC-relative scaled branch offsets. */
2188 if (op->type == TILEGX_OP_TYPE_ADDRESS)
2189 opval = (raw_opval * TILEGX_BUNDLE_SIZE_IN_BYTES) + pc;
2190 else
2191 opval = raw_opval;
2192
2193 /* Record the final value. */
2194 d->operands[i] = op;
2195 d->operand_values[i] = opval;
2196 }
2197 }
2198
2199 return num_instructions;
2200}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 49a605be94c5..c4be58cc5d50 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -22,6 +22,7 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/module.h>
25#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
26#include <asm/traps.h> 27#include <asm/traps.h>
27#include <hv/hypervisor.h> 28#include <hv/hypervisor.h>
@@ -56,6 +57,7 @@ cycles_t get_cycles(void)
56 57
57 return (((cycles_t)high) << 32) | low; 58 return (((cycles_t)high) << 32) | low;
58} 59}
60EXPORT_SYMBOL(get_cycles);
59#endif 61#endif
60 62
61/* 63/*
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
index 2dffc1044d83..a5f241c24cac 100644
--- a/arch/tile/kernel/tlb.c
+++ b/arch/tile/kernel/tlb.c
@@ -34,13 +34,13 @@ void flush_tlb_mm(struct mm_struct *mm)
34{ 34{
35 HV_Remote_ASID asids[NR_CPUS]; 35 HV_Remote_ASID asids[NR_CPUS];
36 int i = 0, cpu; 36 int i = 0, cpu;
37 for_each_cpu(cpu, &mm->cpu_vm_mask) { 37 for_each_cpu(cpu, mm_cpumask(mm)) {
38 HV_Remote_ASID *asid = &asids[i++]; 38 HV_Remote_ASID *asid = &asids[i++];
39 asid->y = cpu / smp_topology.width; 39 asid->y = cpu / smp_topology.width;
40 asid->x = cpu % smp_topology.width; 40 asid->x = cpu % smp_topology.width;
41 asid->asid = per_cpu(current_asid, cpu); 41 asid->asid = per_cpu(current_asid, cpu);
42 } 42 }
43 flush_remote(0, HV_FLUSH_EVICT_L1I, &mm->cpu_vm_mask, 43 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm),
44 0, 0, 0, NULL, asids, i); 44 0, 0, 0, NULL, asids, i);
45} 45}
46 46
@@ -54,8 +54,8 @@ void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm,
54{ 54{
55 unsigned long size = hv_page_size(vma); 55 unsigned long size = hv_page_size(vma);
56 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 56 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
57 flush_remote(0, cache, &mm->cpu_vm_mask, 57 flush_remote(0, cache, mm_cpumask(mm),
58 va, size, size, &mm->cpu_vm_mask, NULL, 0); 58 va, size, size, mm_cpumask(mm), NULL, 0);
59} 59}
60 60
61void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va) 61void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va)
@@ -70,8 +70,8 @@ void flush_tlb_range(const struct vm_area_struct *vma,
70 unsigned long size = hv_page_size(vma); 70 unsigned long size = hv_page_size(vma);
71 struct mm_struct *mm = vma->vm_mm; 71 struct mm_struct *mm = vma->vm_mm;
72 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 72 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
73 flush_remote(0, cache, &mm->cpu_vm_mask, start, end - start, size, 73 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
74 &mm->cpu_vm_mask, NULL, 0); 74 mm_cpumask(mm), NULL, 0);
75} 75}
76 76
77void flush_tlb_all(void) 77void flush_tlb_all(void)
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 5474fc2e77e8..f9803dfa7357 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -308,6 +308,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
308 info.si_addr = (void __user *)address; 308 info.si_addr = (void __user *)address;
309 if (signo == SIGILL) 309 if (signo == SIGILL)
310 info.si_trapno = fault_num; 310 info.si_trapno = fault_num;
311 trace_unhandled_signal("trap", regs, address, signo);
311 force_sig_info(signo, &info, current); 312 force_sig_info(signo, &info, current);
312} 313}
313 314
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 82f64cc63658..24448734f6f1 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -59,7 +59,7 @@
59 * bad kernel addresses). 59 * bad kernel addresses).
60 * 60 *
61 * Note that if the value we would store is the same as what we 61 * Note that if the value we would store is the same as what we
62 * loaded, we bypass the load. Other platforms with true atomics can 62 * loaded, we bypass the store. Other platforms with true atomics can
63 * make the guarantee that a non-atomic __clear_bit(), for example, 63 * make the guarantee that a non-atomic __clear_bit(), for example,
64 * can safely race with an atomic test_and_set_bit(); this example is 64 * can safely race with an atomic test_and_set_bit(); this example is
65 * from bit_spinlock.h in slub_lock() / slub_unlock(). We can't do 65 * from bit_spinlock.h in slub_lock() / slub_unlock(). We can't do
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
index 35c1d8ca5f38..8928aace7a64 100644
--- a/arch/tile/lib/cacheflush.c
+++ b/arch/tile/lib/cacheflush.c
@@ -15,6 +15,7 @@
15#include <asm/page.h> 15#include <asm/page.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <arch/icache.h> 17#include <arch/icache.h>
18#include <arch/spr_def.h>
18 19
19 20
20void __flush_icache_range(unsigned long start, unsigned long end) 21void __flush_icache_range(unsigned long start, unsigned long end)
@@ -39,6 +40,18 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
39 char *p, *base; 40 char *p, *base;
40 size_t step_size, load_count; 41 size_t step_size, load_count;
41 const unsigned long STRIPE_WIDTH = 8192; 42 const unsigned long STRIPE_WIDTH = 8192;
43#ifdef __tilegx__
44 /*
45 * On TILE-Gx, we must disable the dstream prefetcher before doing
46 * a cache flush; otherwise, we could end up with data in the cache
47 * that we don't want there. Note that normally we'd do an mf
48 * after the SPR write to disabling the prefetcher, but we do one
49 * below, before any further loads, so there's no need to do it
50 * here.
51 */
52 uint_reg_t old_dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
53 __insn_mtspr(SPR_DSTREAM_PF, 0);
54#endif
42 55
43 /* 56 /*
44 * Flush and invalidate the buffer out of the local L1/L2 57 * Flush and invalidate the buffer out of the local L1/L2
@@ -122,4 +135,9 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
122 135
123 /* Wait for the load+inv's (and thus finvs) to have completed. */ 136 /* Wait for the load+inv's (and thus finvs) to have completed. */
124 __insn_mf(); 137 __insn_mf();
138
139#ifdef __tilegx__
140 /* Reenable the prefetcher. */
141 __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
142#endif
125} 143}
diff --git a/arch/tile/lib/memchr_64.c b/arch/tile/lib/memchr_64.c
new file mode 100644
index 000000000000..84fdc8d8e735
--- /dev/null
+++ b/arch/tile/lib/memchr_64.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18
19void *memchr(const void *s, int c, size_t n)
20{
21 const uint64_t *last_word_ptr;
22 const uint64_t *p;
23 const char *last_byte_ptr;
24 uintptr_t s_int;
25 uint64_t goal, before_mask, v, bits;
26 char *ret;
27
28 if (__builtin_expect(n == 0, 0)) {
29 /* Don't dereference any memory if the array is empty. */
30 return NULL;
31 }
32
33 /* Get an aligned pointer. */
34 s_int = (uintptr_t) s;
35 p = (const uint64_t *)(s_int & -8);
36
37 /* Create eight copies of the byte for which we are looking. */
38 goal = 0x0101010101010101ULL * (uint8_t) c;
39
40 /* Read the first word, but munge it so that bytes before the array
41 * will not match goal.
42 *
43 * Note that this shift count expression works because we know
44 * shift counts are taken mod 64.
45 */
46 before_mask = (1ULL << (s_int << 3)) - 1;
47 v = (*p | before_mask) ^ (goal & before_mask);
48
49 /* Compute the address of the last byte. */
50 last_byte_ptr = (const char *)s + n - 1;
51
52 /* Compute the address of the word containing the last byte. */
53 last_word_ptr = (const uint64_t *)((uintptr_t) last_byte_ptr & -8);
54
55 while ((bits = __insn_v1cmpeq(v, goal)) == 0) {
56 if (__builtin_expect(p == last_word_ptr, 0)) {
57 /* We already read the last word in the array,
58 * so give up.
59 */
60 return NULL;
61 }
62 v = *++p;
63 }
64
65 /* We found a match, but it might be in a byte past the end
66 * of the array.
67 */
68 ret = ((char *)p) + (__insn_ctz(bits) >> 3);
69 return (ret <= last_byte_ptr) ? ret : NULL;
70}
71EXPORT_SYMBOL(memchr);
diff --git a/arch/tile/lib/memcpy_64.c b/arch/tile/lib/memcpy_64.c
new file mode 100644
index 000000000000..3fab9a6a2bbe
--- /dev/null
+++ b/arch/tile/lib/memcpy_64.c
@@ -0,0 +1,220 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18#define __memcpy memcpy
19/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
20
21/* Must be 8 bytes in size. */
22#define word_t uint64_t
23
24#if CHIP_L2_LINE_SIZE() != 64 && CHIP_L2_LINE_SIZE() != 128
25#error "Assumes 64 or 128 byte line size"
26#endif
27
28/* How many cache lines ahead should we prefetch? */
29#define PREFETCH_LINES_AHEAD 3
30
31/*
32 * Provide "base versions" of load and store for the normal code path.
33 * The kernel provides other versions for userspace copies.
34 */
35#define ST(p, v) (*(p) = (v))
36#define LD(p) (*(p))
37
38#ifndef USERCOPY_FUNC
39#define ST1 ST
40#define ST2 ST
41#define ST4 ST
42#define ST8 ST
43#define LD1 LD
44#define LD2 LD
45#define LD4 LD
46#define LD8 LD
47#define RETVAL dstv
48void *memcpy(void *__restrict dstv, const void *__restrict srcv, size_t n)
49#else
50/*
51 * Special kernel version will provide implementation of the LDn/STn
52 * macros to return a count of uncopied bytes due to mm fault.
53 */
54#define RETVAL 0
55int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
56#endif
57{
58 char *__restrict dst1 = (char *)dstv;
59 const char *__restrict src1 = (const char *)srcv;
60 const char *__restrict src1_end;
61 const char *__restrict prefetch;
62 word_t *__restrict dst8; /* 8-byte pointer to destination memory. */
63 word_t final; /* Final bytes to write to trailing word, if any */
64 long i;
65
66 if (n < 16) {
67 for (; n; n--)
68 ST1(dst1++, LD1(src1++));
69 return RETVAL;
70 }
71
72 /*
73 * Locate the end of source memory we will copy. Don't
74 * prefetch past this.
75 */
76 src1_end = src1 + n - 1;
77
78 /* Prefetch ahead a few cache lines, but not past the end. */
79 prefetch = src1;
80 for (i = 0; i < PREFETCH_LINES_AHEAD; i++) {
81 __insn_prefetch(prefetch);
82 prefetch += CHIP_L2_LINE_SIZE();
83 prefetch = (prefetch > src1_end) ? prefetch : src1;
84 }
85
86 /* Copy bytes until dst is word-aligned. */
87 for (; (uintptr_t)dst1 & (sizeof(word_t) - 1); n--)
88 ST1(dst1++, LD1(src1++));
89
90 /* 8-byte pointer to destination memory. */
91 dst8 = (word_t *)dst1;
92
93 if (__builtin_expect((uintptr_t)src1 & (sizeof(word_t) - 1), 0)) {
94 /*
95 * Misaligned copy. Copy 8 bytes at a time, but don't
96 * bother with other fanciness.
97 *
98 * TODO: Consider prefetching and using wh64 as well.
99 */
100
101 /* Create an aligned src8. */
102 const word_t *__restrict src8 =
103 (const word_t *)((uintptr_t)src1 & -sizeof(word_t));
104 word_t b;
105
106 word_t a = LD8(src8++);
107 for (; n >= sizeof(word_t); n -= sizeof(word_t)) {
108 b = LD8(src8++);
109 a = __insn_dblalign(a, b, src1);
110 ST8(dst8++, a);
111 a = b;
112 }
113
114 if (n == 0)
115 return RETVAL;
116
117 b = ((const char *)src8 <= src1_end) ? *src8 : 0;
118
119 /*
120 * Final source bytes to write to trailing partial
121 * word, if any.
122 */
123 final = __insn_dblalign(a, b, src1);
124 } else {
125 /* Aligned copy. */
126
127 const word_t* __restrict src8 = (const word_t *)src1;
128
129 /* src8 and dst8 are both word-aligned. */
130 if (n >= CHIP_L2_LINE_SIZE()) {
131 /* Copy until 'dst' is cache-line-aligned. */
132 for (; (uintptr_t)dst8 & (CHIP_L2_LINE_SIZE() - 1);
133 n -= sizeof(word_t))
134 ST8(dst8++, LD8(src8++));
135
136 for (; n >= CHIP_L2_LINE_SIZE(); ) {
137 __insn_wh64(dst8);
138
139 /*
140 * Prefetch and advance to next line
141 * to prefetch, but don't go past the end
142 */
143 __insn_prefetch(prefetch);
144 prefetch += CHIP_L2_LINE_SIZE();
145 prefetch = (prefetch > src1_end) ? prefetch :
146 (const char *)src8;
147
148 /*
149 * Copy an entire cache line. Manually
150 * unrolled to avoid idiosyncracies of
151 * compiler unrolling.
152 */
153#define COPY_WORD(offset) ({ ST8(dst8+offset, LD8(src8+offset)); n -= 8; })
154 COPY_WORD(0);
155 COPY_WORD(1);
156 COPY_WORD(2);
157 COPY_WORD(3);
158 COPY_WORD(4);
159 COPY_WORD(5);
160 COPY_WORD(6);
161 COPY_WORD(7);
162#if CHIP_L2_LINE_SIZE() == 128
163 COPY_WORD(8);
164 COPY_WORD(9);
165 COPY_WORD(10);
166 COPY_WORD(11);
167 COPY_WORD(12);
168 COPY_WORD(13);
169 COPY_WORD(14);
170 COPY_WORD(15);
171#elif CHIP_L2_LINE_SIZE() != 64
172# error Fix code that assumes particular L2 cache line sizes
173#endif
174
175 dst8 += CHIP_L2_LINE_SIZE() / sizeof(word_t);
176 src8 += CHIP_L2_LINE_SIZE() / sizeof(word_t);
177 }
178 }
179
180 for (; n >= sizeof(word_t); n -= sizeof(word_t))
181 ST8(dst8++, LD8(src8++));
182
183 if (__builtin_expect(n == 0, 1))
184 return RETVAL;
185
186 final = LD8(src8);
187 }
188
189 /* n != 0 if we get here. Write out any trailing bytes. */
190 dst1 = (char *)dst8;
191 if (n & 4) {
192 ST4((uint32_t *)dst1, final);
193 dst1 += 4;
194 final >>= 32;
195 n &= 3;
196 }
197 if (n & 2) {
198 ST2((uint16_t *)dst1, final);
199 dst1 += 2;
200 final >>= 16;
201 n &= 1;
202 }
203 if (n)
204 ST1((uint8_t *)dst1, final);
205
206 return RETVAL;
207}
208
209
210#ifdef USERCOPY_FUNC
211#undef ST1
212#undef ST2
213#undef ST4
214#undef ST8
215#undef LD1
216#undef LD2
217#undef LD4
218#undef LD8
219#undef USERCOPY_FUNC
220#endif
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
new file mode 100644
index 000000000000..4763b3aff1cc
--- /dev/null
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Do memcpy(), but trap and return "n" when a load or store faults.
15 *
16 * Note: this idiom only works when memcpy() compiles to a leaf function.
17 * If "sp" is updated during memcpy, the "jrp lr" will be incorrect.
18 *
19 * Also note that we are capturing "n" from the containing scope here.
20 */
21
22#define _ST(p, inst, v) \
23 ({ \
24 asm("1: " #inst " %0, %1;" \
25 ".pushsection .coldtext.memcpy,\"ax\";" \
26 "2: { move r0, %2; jrp lr };" \
27 ".section __ex_table,\"a\";" \
28 ".quad 1b, 2b;" \
29 ".popsection" \
30 : "=m" (*(p)) : "r" (v), "r" (n)); \
31 })
32
33#define _LD(p, inst) \
34 ({ \
35 unsigned long __v; \
36 asm("1: " #inst " %0, %1;" \
37 ".pushsection .coldtext.memcpy,\"ax\";" \
38 "2: { move r0, %2; jrp lr };" \
39 ".section __ex_table,\"a\";" \
40 ".quad 1b, 2b;" \
41 ".popsection" \
42 : "=r" (__v) : "m" (*(p)), "r" (n)); \
43 __v; \
44 })
45
46#define USERCOPY_FUNC __copy_to_user_inatomic
47#define ST1(p, v) _ST((p), st1, (v))
48#define ST2(p, v) _ST((p), st2, (v))
49#define ST4(p, v) _ST((p), st4, (v))
50#define ST8(p, v) _ST((p), st, (v))
51#define LD1 LD
52#define LD2 LD
53#define LD4 LD
54#define LD8 LD
55#include "memcpy_64.c"
56
57#define USERCOPY_FUNC __copy_from_user_inatomic
58#define ST1 ST
59#define ST2 ST
60#define ST4 ST
61#define ST8 ST
62#define LD1(p) _LD((p), ld1u)
63#define LD2(p) _LD((p), ld2u)
64#define LD4(p) _LD((p), ld4u)
65#define LD8(p) _LD((p), ld)
66#include "memcpy_64.c"
67
68#define USERCOPY_FUNC __copy_in_user_inatomic
69#define ST1(p, v) _ST((p), st1, (v))
70#define ST2(p, v) _ST((p), st2, (v))
71#define ST4(p, v) _ST((p), st4, (v))
72#define ST8(p, v) _ST((p), st, (v))
73#define LD1(p) _LD((p), ld1u)
74#define LD2(p) _LD((p), ld2u)
75#define LD4(p) _LD((p), ld4u)
76#define LD8(p) _LD((p), ld)
77#include "memcpy_64.c"
78
79unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
80 unsigned long n)
81{
82 unsigned long rc = __copy_from_user_inatomic(to, from, n);
83 if (unlikely(rc))
84 memset(to + n - rc, 0, rc);
85 return rc;
86}
diff --git a/arch/tile/lib/memset_64.c b/arch/tile/lib/memset_64.c
new file mode 100644
index 000000000000..3873085711d5
--- /dev/null
+++ b/arch/tile/lib/memset_64.c
@@ -0,0 +1,145 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <arch/chip.h>
16
17#include <linux/types.h>
18#include <linux/string.h>
19#include <linux/module.h>
20
21#undef memset
22
23void *memset(void *s, int c, size_t n)
24{
25 uint64_t *out64;
26 int n64, to_align64;
27 uint64_t v64;
28 uint8_t *out8 = s;
29
30 /* Experimentation shows that a trivial tight loop is a win up until
31 * around a size of 20, where writing a word at a time starts to win.
32 */
33#define BYTE_CUTOFF 20
34
35#if BYTE_CUTOFF < 7
36 /* This must be at least at least this big, or some code later
37 * on doesn't work.
38 */
39#error "BYTE_CUTOFF is too small"
40#endif
41
42 if (n < BYTE_CUTOFF) {
43 /* Strangely, this turns out to be the tightest way to
44 * write this loop.
45 */
46 if (n != 0) {
47 do {
48 /* Strangely, combining these into one line
49 * performs worse.
50 */
51 *out8 = c;
52 out8++;
53 } while (--n != 0);
54 }
55
56 return s;
57 }
58
59 /* Align 'out8'. We know n >= 7 so this won't write past the end. */
60 while (((uintptr_t) out8 & 7) != 0) {
61 *out8++ = c;
62 --n;
63 }
64
65 /* Align 'n'. */
66 while (n & 7)
67 out8[--n] = c;
68
69 out64 = (uint64_t *) out8;
70 n64 = n >> 3;
71
72 /* Tile input byte out to 64 bits. */
73 /* KLUDGE */
74 v64 = 0x0101010101010101ULL * (uint8_t)c;
75
76 /* This must be at least 8 or the following loop doesn't work. */
77#define CACHE_LINE_SIZE_IN_DOUBLEWORDS (CHIP_L2_LINE_SIZE() / 8)
78
79 /* Determine how many words we need to emit before the 'out32'
80 * pointer becomes aligned modulo the cache line size.
81 */
82 to_align64 = (-((uintptr_t)out64 >> 3)) &
83 (CACHE_LINE_SIZE_IN_DOUBLEWORDS - 1);
84
85 /* Only bother aligning and using wh64 if there is at least
86 * one full cache line to process. This check also prevents
87 * overrunning the end of the buffer with alignment words.
88 */
89 if (to_align64 <= n64 - CACHE_LINE_SIZE_IN_DOUBLEWORDS) {
90 int lines_left;
91
92 /* Align out64 mod the cache line size so we can use wh64. */
93 n64 -= to_align64;
94 for (; to_align64 != 0; to_align64--) {
95 *out64 = v64;
96 out64++;
97 }
98
99 /* Use unsigned divide to turn this into a right shift. */
100 lines_left = (unsigned)n64 / CACHE_LINE_SIZE_IN_DOUBLEWORDS;
101
102 do {
103 /* Only wh64 a few lines at a time, so we don't
104 * exceed the maximum number of victim lines.
105 */
106 int x = ((lines_left < CHIP_MAX_OUTSTANDING_VICTIMS())
107 ? lines_left
108 : CHIP_MAX_OUTSTANDING_VICTIMS());
109 uint64_t *wh = out64;
110 int i = x;
111 int j;
112
113 lines_left -= x;
114
115 do {
116 __insn_wh64(wh);
117 wh += CACHE_LINE_SIZE_IN_DOUBLEWORDS;
118 } while (--i);
119
120 for (j = x * (CACHE_LINE_SIZE_IN_DOUBLEWORDS / 4);
121 j != 0; j--) {
122 *out64++ = v64;
123 *out64++ = v64;
124 *out64++ = v64;
125 *out64++ = v64;
126 }
127 } while (lines_left != 0);
128
129 /* We processed all full lines above, so only this many
130 * words remain to be processed.
131 */
132 n64 &= CACHE_LINE_SIZE_IN_DOUBLEWORDS - 1;
133 }
134
135 /* Now handle any leftover values. */
136 if (n64 != 0) {
137 do {
138 *out64 = v64;
139 out64++;
140 } while (--n64 != 0);
141 }
142
143 return s;
144}
145EXPORT_SYMBOL(memset);
diff --git a/arch/tile/lib/spinlock_64.c b/arch/tile/lib/spinlock_64.c
new file mode 100644
index 000000000000..d6fb9581e980
--- /dev/null
+++ b/arch/tile/lib/spinlock_64.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <asm/processor.h>
18
19#include "spinlock_common.h"
20
21/*
22 * Read the spinlock value without allocating in our cache and without
23 * causing an invalidation to another cpu with a copy of the cacheline.
24 * This is important when we are spinning waiting for the lock.
25 */
26static inline u32 arch_spin_read_noalloc(void *lock)
27{
28 return atomic_cmpxchg((atomic_t *)lock, -1, -1);
29}
30
31/*
32 * Wait until the high bits (current) match my ticket.
33 * If we notice the overflow bit set on entry, we clear it.
34 */
35void arch_spin_lock_slow(arch_spinlock_t *lock, u32 my_ticket)
36{
37 if (unlikely(my_ticket & __ARCH_SPIN_NEXT_OVERFLOW)) {
38 __insn_fetchand4(&lock->lock, ~__ARCH_SPIN_NEXT_OVERFLOW);
39 my_ticket &= ~__ARCH_SPIN_NEXT_OVERFLOW;
40 }
41
42 for (;;) {
43 u32 val = arch_spin_read_noalloc(lock);
44 u32 delta = my_ticket - arch_spin_current(val);
45 if (delta == 0)
46 return;
47 relax((128 / CYCLES_PER_RELAX_LOOP) * delta);
48 }
49}
50EXPORT_SYMBOL(arch_spin_lock_slow);
51
52/*
53 * Check the lock to see if it is plausible, and try to get it with cmpxchg().
54 */
55int arch_spin_trylock(arch_spinlock_t *lock)
56{
57 u32 val = arch_spin_read_noalloc(lock);
58 if (unlikely(arch_spin_current(val) != arch_spin_next(val)))
59 return 0;
60 return cmpxchg(&lock->lock, val, (val + 1) & ~__ARCH_SPIN_NEXT_OVERFLOW)
61 == val;
62}
63EXPORT_SYMBOL(arch_spin_trylock);
64
65void arch_spin_unlock_wait(arch_spinlock_t *lock)
66{
67 u32 iterations = 0;
68 while (arch_spin_is_locked(lock))
69 delay_backoff(iterations++);
70}
71EXPORT_SYMBOL(arch_spin_unlock_wait);
72
73/*
74 * If the read lock fails due to a writer, we retry periodically
75 * until the value is positive and we write our incremented reader count.
76 */
77void __read_lock_failed(arch_rwlock_t *rw)
78{
79 u32 val;
80 int iterations = 0;
81 do {
82 delay_backoff(iterations++);
83 val = __insn_fetchaddgez4(&rw->lock, 1);
84 } while (unlikely(arch_write_val_locked(val)));
85}
86EXPORT_SYMBOL(__read_lock_failed);
87
88/*
89 * If we failed because there were readers, clear the "writer" bit
90 * so we don't block additional readers. Otherwise, there was another
91 * writer anyway, so our "fetchor" made no difference. Then wait,
92 * issuing periodic fetchor instructions, till we get the lock.
93 */
94void __write_lock_failed(arch_rwlock_t *rw, u32 val)
95{
96 int iterations = 0;
97 do {
98 if (!arch_write_val_locked(val))
99 val = __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
100 delay_backoff(iterations++);
101 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
102 } while (val != 0);
103}
104EXPORT_SYMBOL(__write_lock_failed);
diff --git a/arch/tile/lib/strchr_64.c b/arch/tile/lib/strchr_64.c
new file mode 100644
index 000000000000..617a9273aaa8
--- /dev/null
+++ b/arch/tile/lib/strchr_64.c
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18
19#undef strchr
20
21char *strchr(const char *s, int c)
22{
23 int z, g;
24
25 /* Get an aligned pointer. */
26 const uintptr_t s_int = (uintptr_t) s;
27 const uint64_t *p = (const uint64_t *)(s_int & -8);
28
29 /* Create eight copies of the byte for which we are looking. */
30 const uint64_t goal = 0x0101010101010101ULL * (uint8_t) c;
31
32 /* Read the first aligned word, but force bytes before the string to
33 * match neither zero nor goal (we make sure the high bit of each
34 * byte is 1, and the low 7 bits are all the opposite of the goal
35 * byte).
36 *
37 * Note that this shift count expression works because we know shift
38 * counts are taken mod 64.
39 */
40 const uint64_t before_mask = (1ULL << (s_int << 3)) - 1;
41 uint64_t v = (*p | before_mask) ^
42 (goal & __insn_v1shrsi(before_mask, 1));
43
44 uint64_t zero_matches, goal_matches;
45 while (1) {
46 /* Look for a terminating '\0'. */
47 zero_matches = __insn_v1cmpeqi(v, 0);
48
49 /* Look for the goal byte. */
50 goal_matches = __insn_v1cmpeq(v, goal);
51
52 if (__builtin_expect((zero_matches | goal_matches) != 0, 0))
53 break;
54
55 v = *++p;
56 }
57
58 z = __insn_ctz(zero_matches);
59 g = __insn_ctz(goal_matches);
60
61 /* If we found c before '\0' we got a match. Note that if c == '\0'
62 * then g == z, and we correctly return the address of the '\0'
63 * rather than NULL.
64 */
65 return (g <= z) ? ((char *)p) + (g >> 3) : NULL;
66}
67EXPORT_SYMBOL(strchr);
diff --git a/arch/tile/lib/strlen_64.c b/arch/tile/lib/strlen_64.c
new file mode 100644
index 000000000000..1c92d46202a8
--- /dev/null
+++ b/arch/tile/lib/strlen_64.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18
19#undef strlen
20
21size_t strlen(const char *s)
22{
23 /* Get an aligned pointer. */
24 const uintptr_t s_int = (uintptr_t) s;
25 const uint64_t *p = (const uint64_t *)(s_int & -8);
26
27 /* Read the first word, but force bytes before the string to be nonzero.
28 * This expression works because we know shift counts are taken mod 64.
29 */
30 uint64_t v = *p | ((1ULL << (s_int << 3)) - 1);
31
32 uint64_t bits;
33 while ((bits = __insn_v1cmpeqi(v, 0)) == 0)
34 v = *++p;
35
36 return ((const char *)p) + (__insn_ctz(bits) >> 3) - s;
37}
38EXPORT_SYMBOL(strlen);
diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S
new file mode 100644
index 000000000000..2ff44f87b78e
--- /dev/null
+++ b/arch/tile/lib/usercopy_64.S
@@ -0,0 +1,196 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/linkage.h>
16#include <asm/errno.h>
17#include <asm/cache.h>
18#include <arch/chip.h>
19
20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
21
22 .pushsection .fixup,"ax"
23
24get_user_fault:
25 { movei r1, -EFAULT; move r0, zero }
26 jrp lr
27 ENDPROC(get_user_fault)
28
29put_user_fault:
30 { movei r0, -EFAULT; jrp lr }
31 ENDPROC(put_user_fault)
32
33 .popsection
34
35/*
36 * __get_user_N functions take a pointer in r0, and return 0 in r1
37 * on success, with the value in r0; or else -EFAULT in r1.
38 */
39#define __get_user_N(bytes, LOAD) \
40 STD_ENTRY(__get_user_##bytes); \
411: { LOAD r0, r0; move r1, zero }; \
42 jrp lr; \
43 STD_ENDPROC(__get_user_##bytes); \
44 .pushsection __ex_table,"a"; \
45 .quad 1b, get_user_fault; \
46 .popsection
47
48__get_user_N(1, ld1u)
49__get_user_N(2, ld2u)
50__get_user_N(4, ld4u)
51__get_user_N(8, ld)
52
53/*
54 * __put_user_N functions take a value in r0 and a pointer in r1,
55 * and return 0 in r0 on success or -EFAULT on failure.
56 */
57#define __put_user_N(bytes, STORE) \
58 STD_ENTRY(__put_user_##bytes); \
591: { STORE r1, r0; move r0, zero }; \
60 jrp lr; \
61 STD_ENDPROC(__put_user_##bytes); \
62 .pushsection __ex_table,"a"; \
63 .quad 1b, put_user_fault; \
64 .popsection
65
66__put_user_N(1, st1)
67__put_user_N(2, st2)
68__put_user_N(4, st4)
69__put_user_N(8, st)
70
71/*
72 * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
73 * It returns the length, including the terminating NUL, or zero on exception.
74 * If length is greater than the bound, returns one plus the bound.
75 */
76STD_ENTRY(strnlen_user_asm)
77 { beqz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */
781: { ld1u r4, r0; addi r1, r1, -1 }
79 beqz r4, 2f
80 { bnezt r1, 1b; addi r0, r0, 1 }
812: { sub r0, r0, r3; jrp lr }
82 STD_ENDPROC(strnlen_user_asm)
83 .pushsection .fixup,"ax"
84strnlen_user_fault:
85 { move r0, zero; jrp lr }
86 ENDPROC(strnlen_user_fault)
87 .section __ex_table,"a"
88 .quad 1b, strnlen_user_fault
89 .popsection
90
91/*
92 * strncpy_from_user_asm takes the kernel target pointer in r0,
93 * the userspace source pointer in r1, and the length bound (including
94 * the trailing NUL) in r2. On success, it returns the string length
95 * (not including the trailing NUL), or -EFAULT on failure.
96 */
97STD_ENTRY(strncpy_from_user_asm)
98 { beqz r2, 2f; move r3, r0 }
991: { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
100 { st1 r0, r4; addi r0, r0, 1 }
101 beqz r2, 2f
102 bnezt r4, 1b
103 addi r0, r0, -1 /* don't count the trailing NUL */
1042: { sub r0, r0, r3; jrp lr }
105 STD_ENDPROC(strncpy_from_user_asm)
106 .pushsection .fixup,"ax"
107strncpy_from_user_fault:
108 { movei r0, -EFAULT; jrp lr }
109 ENDPROC(strncpy_from_user_fault)
110 .section __ex_table,"a"
111 .quad 1b, strncpy_from_user_fault
112 .popsection
113
114/*
115 * clear_user_asm takes the user target address in r0 and the
116 * number of bytes to zero in r1.
117 * It returns the number of uncopiable bytes (hopefully zero) in r0.
118 * Note that we don't use a separate .fixup section here since we fall
119 * through into the "fixup" code as the last straight-line bundle anyway.
120 */
121STD_ENTRY(clear_user_asm)
122 { beqz r1, 2f; or r2, r0, r1 }
123 andi r2, r2, 7
124 beqzt r2, .Lclear_aligned_user_asm
1251: { st1 r0, zero; addi r0, r0, 1; addi r1, r1, -1 }
126 bnezt r1, 1b
1272: { move r0, r1; jrp lr }
128 .pushsection __ex_table,"a"
129 .quad 1b, 2b
130 .popsection
131
132.Lclear_aligned_user_asm:
1331: { st r0, zero; addi r0, r0, 8; addi r1, r1, -8 }
134 bnezt r1, 1b
1352: { move r0, r1; jrp lr }
136 STD_ENDPROC(clear_user_asm)
137 .pushsection __ex_table,"a"
138 .quad 1b, 2b
139 .popsection
140
141/*
142 * flush_user_asm takes the user target address in r0 and the
143 * number of bytes to flush in r1.
144 * It returns the number of unflushable bytes (hopefully zero) in r0.
145 */
146STD_ENTRY(flush_user_asm)
147 beqz r1, 2f
148 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
149 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
150 { and r0, r0, r2; and r1, r1, r2 }
151 { sub r1, r1, r0 }
1521: { flush r0; addi r1, r1, -CHIP_FLUSH_STRIDE() }
153 { addi r0, r0, CHIP_FLUSH_STRIDE(); bnezt r1, 1b }
1542: { move r0, r1; jrp lr }
155 STD_ENDPROC(flush_user_asm)
156 .pushsection __ex_table,"a"
157 .quad 1b, 2b
158 .popsection
159
160/*
161 * inv_user_asm takes the user target address in r0 and the
162 * number of bytes to invalidate in r1.
163 * It returns the number of not inv'able bytes (hopefully zero) in r0.
164 */
165STD_ENTRY(inv_user_asm)
166 beqz r1, 2f
167 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
168 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
169 { and r0, r0, r2; and r1, r1, r2 }
170 { sub r1, r1, r0 }
1711: { inv r0; addi r1, r1, -CHIP_INV_STRIDE() }
172 { addi r0, r0, CHIP_INV_STRIDE(); bnezt r1, 1b }
1732: { move r0, r1; jrp lr }
174 STD_ENDPROC(inv_user_asm)
175 .pushsection __ex_table,"a"
176 .quad 1b, 2b
177 .popsection
178
179/*
180 * finv_user_asm takes the user target address in r0 and the
181 * number of bytes to flush-invalidate in r1.
182 * It returns the number of not finv'able bytes (hopefully zero) in r0.
183 */
184STD_ENTRY(finv_user_asm)
185 beqz r1, 2f
186 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
187 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
188 { and r0, r0, r2; and r1, r1, r2 }
189 { sub r1, r1, r0 }
1901: { finv r0; addi r1, r1, -CHIP_FINV_STRIDE() }
191 { addi r0, r0, CHIP_FINV_STRIDE(); bnezt r1, 1b }
1922: { move r0, r1; jrp lr }
193 STD_ENDPROC(finv_user_asm)
194 .pushsection __ex_table,"a"
195 .quad 1b, 2b
196 .popsection
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 51f8663bf074..25b7b90fd620 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -43,8 +43,11 @@
43 43
44#include <arch/interrupts.h> 44#include <arch/interrupts.h>
45 45
46static noinline void force_sig_info_fault(int si_signo, int si_code, 46static noinline void force_sig_info_fault(const char *type, int si_signo,
47 unsigned long address, int fault_num, struct task_struct *tsk) 47 int si_code, unsigned long address,
48 int fault_num,
49 struct task_struct *tsk,
50 struct pt_regs *regs)
48{ 51{
49 siginfo_t info; 52 siginfo_t info;
50 53
@@ -59,6 +62,7 @@ static noinline void force_sig_info_fault(int si_signo, int si_code,
59 info.si_code = si_code; 62 info.si_code = si_code;
60 info.si_addr = (void __user *)address; 63 info.si_addr = (void __user *)address;
61 info.si_trapno = fault_num; 64 info.si_trapno = fault_num;
65 trace_unhandled_signal(type, regs, address, si_signo);
62 force_sig_info(si_signo, &info, tsk); 66 force_sig_info(si_signo, &info, tsk);
63} 67}
64 68
@@ -71,11 +75,12 @@ SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address,
71 struct pt_regs *, regs) 75 struct pt_regs *, regs)
72{ 76{
73 if (address >= PAGE_OFFSET) 77 if (address >= PAGE_OFFSET)
74 force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address, 78 force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR,
75 INT_DTLB_MISS, current); 79 address, INT_DTLB_MISS, current, regs);
76 else 80 else
77 force_sig_info_fault(SIGBUS, BUS_ADRALN, address, 81 force_sig_info_fault("atomic alignment fault", SIGBUS,
78 INT_UNALIGN_DATA, current); 82 BUS_ADRALN, address,
83 INT_UNALIGN_DATA, current, regs);
79 84
80 /* 85 /*
81 * Adjust pc to point at the actual instruction, which is unusual 86 * Adjust pc to point at the actual instruction, which is unusual
@@ -471,8 +476,8 @@ bad_area_nosemaphore:
471 */ 476 */
472 local_irq_enable(); 477 local_irq_enable();
473 478
474 force_sig_info_fault(SIGSEGV, si_code, address, 479 force_sig_info_fault("segfault", SIGSEGV, si_code, address,
475 fault_num, tsk); 480 fault_num, tsk, regs);
476 return 0; 481 return 0;
477 } 482 }
478 483
@@ -547,7 +552,8 @@ do_sigbus:
547 if (is_kernel_mode) 552 if (is_kernel_mode)
548 goto no_context; 553 goto no_context;
549 554
550 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, fault_num, tsk); 555 force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
556 fault_num, tsk, regs);
551 return 0; 557 return 0;
552} 558}
553 559
@@ -732,6 +738,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
732 panic("Bad fault number %d in do_page_fault", fault_num); 738 panic("Bad fault number %d in do_page_fault", fault_num);
733 } 739 }
734 740
741#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
735 if (EX1_PL(regs->ex1) != USER_PL) { 742 if (EX1_PL(regs->ex1) != USER_PL) {
736 struct async_tlb *async; 743 struct async_tlb *async;
737 switch (fault_num) { 744 switch (fault_num) {
@@ -775,6 +782,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
775 return; 782 return;
776 } 783 }
777 } 784 }
785#endif
778 786
779 handle_page_fault(regs, fault_num, is_page_fault, address, write); 787 handle_page_fault(regs, fault_num, is_page_fault, address, write);
780} 788}
@@ -801,8 +809,6 @@ static void handle_async_page_fault(struct pt_regs *regs,
801 async->address, async->is_write); 809 async->address, async->is_write);
802 } 810 }
803} 811}
804#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
805
806 812
807/* 813/*
808 * This routine effectively re-issues asynchronous page faults 814 * This routine effectively re-issues asynchronous page faults
@@ -824,6 +830,8 @@ void do_async_page_fault(struct pt_regs *regs)
824 handle_async_page_fault(regs, &current->thread.sn_async_tlb); 830 handle_async_page_fault(regs, &current->thread.sn_async_tlb);
825#endif 831#endif
826} 832}
833#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
834
827 835
828void vmalloc_sync_all(void) 836void vmalloc_sync_all(void)
829{ 837{
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index d6e87fda2fb2..4e10c4023028 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -60,8 +60,6 @@ unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
60EXPORT_SYMBOL(VMALLOC_RESERVE); 60EXPORT_SYMBOL(VMALLOC_RESERVE);
61#endif 61#endif
62 62
63DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
64
65/* Create an L2 page table */ 63/* Create an L2 page table */
66static pte_t * __init alloc_pte(void) 64static pte_t * __init alloc_pte(void)
67{ 65{
diff --git a/arch/tile/mm/migrate_64.S b/arch/tile/mm/migrate_64.S
new file mode 100644
index 000000000000..e76fea688beb
--- /dev/null
+++ b/arch/tile/mm/migrate_64.S
@@ -0,0 +1,187 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * This routine is a helper for migrating the home of a set of pages to
15 * a new cpu. See the documentation in homecache.c for more information.
16 */
17
18#include <linux/linkage.h>
19#include <linux/threads.h>
20#include <asm/page.h>
21#include <asm/thread_info.h>
22#include <asm/types.h>
23#include <asm/asm-offsets.h>
24#include <hv/hypervisor.h>
25
26 .text
27
28/*
29 * First, some definitions that apply to all the code in the file.
30 */
31
32/* Locals (caller-save) */
33#define r_tmp r10
34#define r_save_sp r11
35
36/* What we save where in the stack frame; must include all callee-saves. */
37#define FRAME_SP 8
38#define FRAME_R30 16
39#define FRAME_R31 24
40#define FRAME_R32 32
41#define FRAME_R33 40
42#define FRAME_SIZE 48
43
44
45
46
47/*
48 * On entry:
49 *
50 * r0 the new context PA to install (moved to r_context)
51 * r1 PTE to use for context access (moved to r_access)
52 * r2 ASID to use for new context (moved to r_asid)
53 * r3 pointer to cpumask with just this cpu set in it (r_my_cpumask)
54 */
55
56/* Arguments (caller-save) */
57#define r_context_in r0
58#define r_access_in r1
59#define r_asid_in r2
60#define r_my_cpumask r3
61
62/* Locals (callee-save); must not be more than FRAME_xxx above. */
63#define r_save_ics r30
64#define r_context r31
65#define r_access r32
66#define r_asid r33
67
68/*
69 * Caller-save locals and frame constants are the same as
70 * for homecache_migrate_stack_and_flush.
71 */
72
73STD_ENTRY(flush_and_install_context)
74 /*
75 * Create a stack frame; we can't touch it once we flush the
76 * cache until we install the new page table and flush the TLB.
77 */
78 {
79 move r_save_sp, sp
80 st sp, lr
81 addi sp, sp, -FRAME_SIZE
82 }
83 addi r_tmp, sp, FRAME_SP
84 {
85 st r_tmp, r_save_sp
86 addi r_tmp, sp, FRAME_R30
87 }
88 {
89 st r_tmp, r30
90 addi r_tmp, sp, FRAME_R31
91 }
92 {
93 st r_tmp, r31
94 addi r_tmp, sp, FRAME_R32
95 }
96 {
97 st r_tmp, r32
98 addi r_tmp, sp, FRAME_R33
99 }
100 st r_tmp, r33
101
102 /* Move some arguments to callee-save registers. */
103 {
104 move r_context, r_context_in
105 move r_access, r_access_in
106 }
107 move r_asid, r_asid_in
108
109 /* Disable interrupts, since we can't use our stack. */
110 {
111 mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
112 movei r_tmp, 1
113 }
114 mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
115
116 /* First, flush our L2 cache. */
117 {
118 move r0, zero /* cache_pa */
119 moveli r1, hw2_last(HV_FLUSH_EVICT_L2) /* cache_control */
120 }
121 {
122 shl16insli r1, r1, hw1(HV_FLUSH_EVICT_L2)
123 move r2, r_my_cpumask /* cache_cpumask */
124 }
125 {
126 shl16insli r1, r1, hw0(HV_FLUSH_EVICT_L2)
127 move r3, zero /* tlb_va */
128 }
129 {
130 move r4, zero /* tlb_length */
131 move r5, zero /* tlb_pgsize */
132 }
133 {
134 move r6, zero /* tlb_cpumask */
135 move r7, zero /* asids */
136 }
137 {
138 move r8, zero /* asidcount */
139 jal hv_flush_remote
140 }
141 bnez r0, 1f
142
143 /* Now install the new page table. */
144 {
145 move r0, r_context
146 move r1, r_access
147 }
148 {
149 move r2, r_asid
150 movei r3, HV_CTX_DIRECTIO
151 }
152 jal hv_install_context
153 bnez r0, 1f
154
155 /* Finally, flush the TLB. */
156 {
157 movei r0, 0 /* preserve_global */
158 jal hv_flush_all
159 }
160
1611: /* Reset interrupts back how they were before. */
162 mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
163
164 /* Restore the callee-saved registers and return. */
165 addli lr, sp, FRAME_SIZE
166 {
167 ld lr, lr
168 addli r_tmp, sp, FRAME_R30
169 }
170 {
171 ld r30, r_tmp
172 addli r_tmp, sp, FRAME_R31
173 }
174 {
175 ld r31, r_tmp
176 addli r_tmp, sp, FRAME_R32
177 }
178 {
179 ld r32, r_tmp
180 addli r_tmp, sp, FRAME_R33
181 }
182 {
183 ld r33, r_tmp
184 addi sp, sp, FRAME_SIZE
185 }
186 jrp lr
187 STD_ENDPROC(flush_and_install_context)
diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
index 8fce5e536b0f..68205fd3b08c 100644
--- a/arch/um/Kconfig.debug
+++ b/arch/um/Kconfig.debug
@@ -28,13 +28,13 @@ config GCOV
28 If you're involved in UML kernel development and want to use gcov, 28 If you're involved in UML kernel development and want to use gcov,
29 say Y. If you're unsure, say N. 29 say Y. If you're unsure, say N.
30 30
31config DEBUG_STACK_USAGE 31config EARLY_PRINTK
32 bool "Stack utilization instrumentation" 32 bool "Early printk"
33 default N 33 default y
34 help 34 ---help---
35 Track the maximum kernel stack usage - this will look at each 35 Write kernel log output directly to stdout.
36 kernel stack at process exit and log it if it's the deepest 36
37 stack seen so far. 37 This is useful for kernel debugging when your machine crashes very
38 early before the console code is initialized.
38 39
39 This option will slow down process creation and destruction somewhat.
40endmenu 40endmenu
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index 1d9b6ae967b0..e7582e1d248c 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -9,7 +9,7 @@
9slip-objs := slip_kern.o slip_user.o 9slip-objs := slip_kern.o slip_user.o
10slirp-objs := slirp_kern.o slirp_user.o 10slirp-objs := slirp_kern.o slirp_user.o
11daemon-objs := daemon_kern.o daemon_user.o 11daemon-objs := daemon_kern.o daemon_user.o
12mcast-objs := mcast_kern.o mcast_user.o 12umcast-objs := umcast_kern.o umcast_user.o
13net-objs := net_kern.o net_user.o 13net-objs := net_kern.o net_user.o
14mconsole-objs := mconsole_kern.o mconsole_user.o 14mconsole-objs := mconsole_kern.o mconsole_user.o
15hostaudio-objs := hostaudio_kern.o 15hostaudio-objs := hostaudio_kern.o
@@ -44,7 +44,7 @@ obj-$(CONFIG_UML_NET_SLIP) += slip.o slip_common.o
44obj-$(CONFIG_UML_NET_SLIRP) += slirp.o slip_common.o 44obj-$(CONFIG_UML_NET_SLIRP) += slirp.o slip_common.o
45obj-$(CONFIG_UML_NET_DAEMON) += daemon.o 45obj-$(CONFIG_UML_NET_DAEMON) += daemon.o
46obj-$(CONFIG_UML_NET_VDE) += vde.o 46obj-$(CONFIG_UML_NET_VDE) += vde.o
47obj-$(CONFIG_UML_NET_MCAST) += mcast.o 47obj-$(CONFIG_UML_NET_MCAST) += umcast.o
48obj-$(CONFIG_UML_NET_PCAP) += pcap.o 48obj-$(CONFIG_UML_NET_PCAP) += pcap.o
49obj-$(CONFIG_UML_NET) += net.o 49obj-$(CONFIG_UML_NET) += net.o
50obj-$(CONFIG_MCONSOLE) += mconsole.o 50obj-$(CONFIG_MCONSOLE) += mconsole.o
diff --git a/arch/um/drivers/mcast.h b/arch/um/drivers/mcast.h
deleted file mode 100644
index 6fa282e896be..000000000000
--- a/arch/um/drivers/mcast.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __DRIVERS_MCAST_H
7#define __DRIVERS_MCAST_H
8
9#include "net_user.h"
10
11struct mcast_data {
12 char *addr;
13 unsigned short port;
14 void *mcast_addr;
15 int ttl;
16 void *dev;
17};
18
19extern const struct net_user_info mcast_user_info;
20
21extern int mcast_user_write(int fd, void *buf, int len,
22 struct mcast_data *pri);
23
24#endif
diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c
deleted file mode 100644
index ffc6416d5ed7..000000000000
--- a/arch/um/drivers/mcast_kern.c
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * user-mode-linux networking multicast transport
3 * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org>
4 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 *
6 * based on the existing uml-networking code, which is
7 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
8 * James Leu (jleu@mindspring.net).
9 * Copyright (C) 2001 by various other people who didn't put their name here.
10 *
11 * Licensed under the GPL.
12 */
13
14#include "linux/init.h"
15#include <linux/netdevice.h>
16#include "mcast.h"
17#include "net_kern.h"
18
19struct mcast_init {
20 char *addr;
21 int port;
22 int ttl;
23};
24
25static void mcast_init(struct net_device *dev, void *data)
26{
27 struct uml_net_private *pri;
28 struct mcast_data *dpri;
29 struct mcast_init *init = data;
30
31 pri = netdev_priv(dev);
32 dpri = (struct mcast_data *) pri->user;
33 dpri->addr = init->addr;
34 dpri->port = init->port;
35 dpri->ttl = init->ttl;
36 dpri->dev = dev;
37
38 printk("mcast backend multicast address: %s:%u, TTL:%u\n",
39 dpri->addr, dpri->port, dpri->ttl);
40}
41
42static int mcast_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
43{
44 return net_recvfrom(fd, skb_mac_header(skb),
45 skb->dev->mtu + ETH_HEADER_OTHER);
46}
47
48static int mcast_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
49{
50 return mcast_user_write(fd, skb->data, skb->len,
51 (struct mcast_data *) &lp->user);
52}
53
54static const struct net_kern_info mcast_kern_info = {
55 .init = mcast_init,
56 .protocol = eth_protocol,
57 .read = mcast_read,
58 .write = mcast_write,
59};
60
61static int mcast_setup(char *str, char **mac_out, void *data)
62{
63 struct mcast_init *init = data;
64 char *port_str = NULL, *ttl_str = NULL, *remain;
65 char *last;
66
67 *init = ((struct mcast_init)
68 { .addr = "239.192.168.1",
69 .port = 1102,
70 .ttl = 1 });
71
72 remain = split_if_spec(str, mac_out, &init->addr, &port_str, &ttl_str,
73 NULL);
74 if (remain != NULL) {
75 printk(KERN_ERR "mcast_setup - Extra garbage on "
76 "specification : '%s'\n", remain);
77 return 0;
78 }
79
80 if (port_str != NULL) {
81 init->port = simple_strtoul(port_str, &last, 10);
82 if ((*last != '\0') || (last == port_str)) {
83 printk(KERN_ERR "mcast_setup - Bad port : '%s'\n",
84 port_str);
85 return 0;
86 }
87 }
88
89 if (ttl_str != NULL) {
90 init->ttl = simple_strtoul(ttl_str, &last, 10);
91 if ((*last != '\0') || (last == ttl_str)) {
92 printk(KERN_ERR "mcast_setup - Bad ttl : '%s'\n",
93 ttl_str);
94 return 0;
95 }
96 }
97
98 printk(KERN_INFO "Configured mcast device: %s:%u-%u\n", init->addr,
99 init->port, init->ttl);
100
101 return 1;
102}
103
104static struct transport mcast_transport = {
105 .list = LIST_HEAD_INIT(mcast_transport.list),
106 .name = "mcast",
107 .setup = mcast_setup,
108 .user = &mcast_user_info,
109 .kern = &mcast_kern_info,
110 .private_size = sizeof(struct mcast_data),
111 .setup_size = sizeof(struct mcast_init),
112};
113
114static int register_mcast(void)
115{
116 register_transport(&mcast_transport);
117 return 0;
118}
119
120late_initcall(register_mcast);
diff --git a/arch/um/drivers/mcast_user.c b/arch/um/drivers/mcast_user.c
deleted file mode 100644
index ee19e91568a2..000000000000
--- a/arch/um/drivers/mcast_user.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * user-mode-linux networking multicast transport
3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org>
5 *
6 * based on the existing uml-networking code, which is
7 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
8 * James Leu (jleu@mindspring.net).
9 * Copyright (C) 2001 by various other people who didn't put their name here.
10 *
11 * Licensed under the GPL.
12 *
13 */
14
15#include <unistd.h>
16#include <errno.h>
17#include <netinet/in.h>
18#include "kern_constants.h"
19#include "mcast.h"
20#include "net_user.h"
21#include "um_malloc.h"
22#include "user.h"
23
24static struct sockaddr_in *new_addr(char *addr, unsigned short port)
25{
26 struct sockaddr_in *sin;
27
28 sin = uml_kmalloc(sizeof(struct sockaddr_in), UM_GFP_KERNEL);
29 if (sin == NULL) {
30 printk(UM_KERN_ERR "new_addr: allocation of sockaddr_in "
31 "failed\n");
32 return NULL;
33 }
34 sin->sin_family = AF_INET;
35 sin->sin_addr.s_addr = in_aton(addr);
36 sin->sin_port = htons(port);
37 return sin;
38}
39
40static int mcast_user_init(void *data, void *dev)
41{
42 struct mcast_data *pri = data;
43
44 pri->mcast_addr = new_addr(pri->addr, pri->port);
45 pri->dev = dev;
46 return 0;
47}
48
49static void mcast_remove(void *data)
50{
51 struct mcast_data *pri = data;
52
53 kfree(pri->mcast_addr);
54 pri->mcast_addr = NULL;
55}
56
57static int mcast_open(void *data)
58{
59 struct mcast_data *pri = data;
60 struct sockaddr_in *sin = pri->mcast_addr;
61 struct ip_mreq mreq;
62 int fd, yes = 1, err = -EINVAL;
63
64
65 if ((sin->sin_addr.s_addr == 0) || (sin->sin_port == 0))
66 goto out;
67
68 fd = socket(AF_INET, SOCK_DGRAM, 0);
69
70 if (fd < 0) {
71 err = -errno;
72 printk(UM_KERN_ERR "mcast_open : data socket failed, "
73 "errno = %d\n", errno);
74 goto out;
75 }
76
77 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) {
78 err = -errno;
79 printk(UM_KERN_ERR "mcast_open: SO_REUSEADDR failed, "
80 "errno = %d\n", errno);
81 goto out_close;
82 }
83
84 /* set ttl according to config */
85 if (setsockopt(fd, SOL_IP, IP_MULTICAST_TTL, &pri->ttl,
86 sizeof(pri->ttl)) < 0) {
87 err = -errno;
88 printk(UM_KERN_ERR "mcast_open: IP_MULTICAST_TTL failed, "
89 "error = %d\n", errno);
90 goto out_close;
91 }
92
93 /* set LOOP, so data does get fed back to local sockets */
94 if (setsockopt(fd, SOL_IP, IP_MULTICAST_LOOP, &yes, sizeof(yes)) < 0) {
95 err = -errno;
96 printk(UM_KERN_ERR "mcast_open: IP_MULTICAST_LOOP failed, "
97 "error = %d\n", errno);
98 goto out_close;
99 }
100
101 /* bind socket to mcast address */
102 if (bind(fd, (struct sockaddr *) sin, sizeof(*sin)) < 0) {
103 err = -errno;
104 printk(UM_KERN_ERR "mcast_open : data bind failed, "
105 "errno = %d\n", errno);
106 goto out_close;
107 }
108
109 /* subscribe to the multicast group */
110 mreq.imr_multiaddr.s_addr = sin->sin_addr.s_addr;
111 mreq.imr_interface.s_addr = 0;
112 if (setsockopt(fd, SOL_IP, IP_ADD_MEMBERSHIP,
113 &mreq, sizeof(mreq)) < 0) {
114 err = -errno;
115 printk(UM_KERN_ERR "mcast_open: IP_ADD_MEMBERSHIP failed, "
116 "error = %d\n", errno);
117 printk(UM_KERN_ERR "There appears not to be a multicast-"
118 "capable network interface on the host.\n");
119 printk(UM_KERN_ERR "eth0 should be configured in order to use "
120 "the multicast transport.\n");
121 goto out_close;
122 }
123
124 return fd;
125
126 out_close:
127 close(fd);
128 out:
129 return err;
130}
131
132static void mcast_close(int fd, void *data)
133{
134 struct ip_mreq mreq;
135 struct mcast_data *pri = data;
136 struct sockaddr_in *sin = pri->mcast_addr;
137
138 mreq.imr_multiaddr.s_addr = sin->sin_addr.s_addr;
139 mreq.imr_interface.s_addr = 0;
140 if (setsockopt(fd, SOL_IP, IP_DROP_MEMBERSHIP,
141 &mreq, sizeof(mreq)) < 0) {
142 printk(UM_KERN_ERR "mcast_open: IP_DROP_MEMBERSHIP failed, "
143 "error = %d\n", errno);
144 }
145
146 close(fd);
147}
148
149int mcast_user_write(int fd, void *buf, int len, struct mcast_data *pri)
150{
151 struct sockaddr_in *data_addr = pri->mcast_addr;
152
153 return net_sendto(fd, buf, len, data_addr, sizeof(*data_addr));
154}
155
156const struct net_user_info mcast_user_info = {
157 .init = mcast_user_init,
158 .open = mcast_open,
159 .close = mcast_close,
160 .remove = mcast_remove,
161 .add_address = NULL,
162 .delete_address = NULL,
163 .mtu = ETH_MAX_PACKET,
164 .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER,
165};
diff --git a/arch/um/drivers/umcast.h b/arch/um/drivers/umcast.h
new file mode 100644
index 000000000000..6f8c0fe890fb
--- /dev/null
+++ b/arch/um/drivers/umcast.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#ifndef __DRIVERS_UMCAST_H
7#define __DRIVERS_UMCAST_H
8
9#include "net_user.h"
10
11struct umcast_data {
12 char *addr;
13 unsigned short lport;
14 unsigned short rport;
15 void *listen_addr;
16 void *remote_addr;
17 int ttl;
18 int unicast;
19 void *dev;
20};
21
22extern const struct net_user_info umcast_user_info;
23
24extern int umcast_user_write(int fd, void *buf, int len,
25 struct umcast_data *pri);
26
27#endif
diff --git a/arch/um/drivers/umcast_kern.c b/arch/um/drivers/umcast_kern.c
new file mode 100644
index 000000000000..42dab11d2ecf
--- /dev/null
+++ b/arch/um/drivers/umcast_kern.c
@@ -0,0 +1,188 @@
1/*
2 * user-mode-linux networking multicast transport
3 * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org>
4 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 *
6 * based on the existing uml-networking code, which is
7 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
8 * James Leu (jleu@mindspring.net).
9 * Copyright (C) 2001 by various other people who didn't put their name here.
10 *
11 * Licensed under the GPL.
12 */
13
14#include "linux/init.h"
15#include <linux/netdevice.h>
16#include "umcast.h"
17#include "net_kern.h"
18
19struct umcast_init {
20 char *addr;
21 int lport;
22 int rport;
23 int ttl;
24 bool unicast;
25};
26
27static void umcast_init(struct net_device *dev, void *data)
28{
29 struct uml_net_private *pri;
30 struct umcast_data *dpri;
31 struct umcast_init *init = data;
32
33 pri = netdev_priv(dev);
34 dpri = (struct umcast_data *) pri->user;
35 dpri->addr = init->addr;
36 dpri->lport = init->lport;
37 dpri->rport = init->rport;
38 dpri->unicast = init->unicast;
39 dpri->ttl = init->ttl;
40 dpri->dev = dev;
41
42 if (dpri->unicast) {
43 printk(KERN_INFO "ucast backend address: %s:%u listen port: "
44 "%u\n", dpri->addr, dpri->rport, dpri->lport);
45 } else {
46 printk(KERN_INFO "mcast backend multicast address: %s:%u, "
47 "TTL:%u\n", dpri->addr, dpri->lport, dpri->ttl);
48 }
49}
50
51static int umcast_read(int fd, struct sk_buff *skb, struct uml_net_private *lp)
52{
53 return net_recvfrom(fd, skb_mac_header(skb),
54 skb->dev->mtu + ETH_HEADER_OTHER);
55}
56
57static int umcast_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
58{
59 return umcast_user_write(fd, skb->data, skb->len,
60 (struct umcast_data *) &lp->user);
61}
62
63static const struct net_kern_info umcast_kern_info = {
64 .init = umcast_init,
65 .protocol = eth_protocol,
66 .read = umcast_read,
67 .write = umcast_write,
68};
69
70static int mcast_setup(char *str, char **mac_out, void *data)
71{
72 struct umcast_init *init = data;
73 char *port_str = NULL, *ttl_str = NULL, *remain;
74 char *last;
75
76 *init = ((struct umcast_init)
77 { .addr = "239.192.168.1",
78 .lport = 1102,
79 .ttl = 1 });
80
81 remain = split_if_spec(str, mac_out, &init->addr, &port_str, &ttl_str,
82 NULL);
83 if (remain != NULL) {
84 printk(KERN_ERR "mcast_setup - Extra garbage on "
85 "specification : '%s'\n", remain);
86 return 0;
87 }
88
89 if (port_str != NULL) {
90 init->lport = simple_strtoul(port_str, &last, 10);
91 if ((*last != '\0') || (last == port_str)) {
92 printk(KERN_ERR "mcast_setup - Bad port : '%s'\n",
93 port_str);
94 return 0;
95 }
96 }
97
98 if (ttl_str != NULL) {
99 init->ttl = simple_strtoul(ttl_str, &last, 10);
100 if ((*last != '\0') || (last == ttl_str)) {
101 printk(KERN_ERR "mcast_setup - Bad ttl : '%s'\n",
102 ttl_str);
103 return 0;
104 }
105 }
106
107 init->unicast = false;
108 init->rport = init->lport;
109
110 printk(KERN_INFO "Configured mcast device: %s:%u-%u\n", init->addr,
111 init->lport, init->ttl);
112
113 return 1;
114}
115
116static int ucast_setup(char *str, char **mac_out, void *data)
117{
118 struct umcast_init *init = data;
119 char *lport_str = NULL, *rport_str = NULL, *remain;
120 char *last;
121
122 *init = ((struct umcast_init)
123 { .addr = "",
124 .lport = 1102,
125 .rport = 1102 });
126
127 remain = split_if_spec(str, mac_out, &init->addr,
128 &lport_str, &rport_str, NULL);
129 if (remain != NULL) {
130 printk(KERN_ERR "ucast_setup - Extra garbage on "
131 "specification : '%s'\n", remain);
132 return 0;
133 }
134
135 if (lport_str != NULL) {
136 init->lport = simple_strtoul(lport_str, &last, 10);
137 if ((*last != '\0') || (last == lport_str)) {
138 printk(KERN_ERR "ucast_setup - Bad listen port : "
139 "'%s'\n", lport_str);
140 return 0;
141 }
142 }
143
144 if (rport_str != NULL) {
145 init->rport = simple_strtoul(rport_str, &last, 10);
146 if ((*last != '\0') || (last == rport_str)) {
147 printk(KERN_ERR "ucast_setup - Bad remote port : "
148 "'%s'\n", rport_str);
149 return 0;
150 }
151 }
152
153 init->unicast = true;
154
155 printk(KERN_INFO "Configured ucast device: :%u -> %s:%u\n",
156 init->lport, init->addr, init->rport);
157
158 return 1;
159}
160
161static struct transport mcast_transport = {
162 .list = LIST_HEAD_INIT(mcast_transport.list),
163 .name = "mcast",
164 .setup = mcast_setup,
165 .user = &umcast_user_info,
166 .kern = &umcast_kern_info,
167 .private_size = sizeof(struct umcast_data),
168 .setup_size = sizeof(struct umcast_init),
169};
170
171static struct transport ucast_transport = {
172 .list = LIST_HEAD_INIT(ucast_transport.list),
173 .name = "ucast",
174 .setup = ucast_setup,
175 .user = &umcast_user_info,
176 .kern = &umcast_kern_info,
177 .private_size = sizeof(struct umcast_data),
178 .setup_size = sizeof(struct umcast_init),
179};
180
181static int register_umcast(void)
182{
183 register_transport(&mcast_transport);
184 register_transport(&ucast_transport);
185 return 0;
186}
187
188late_initcall(register_umcast);
diff --git a/arch/um/drivers/umcast_user.c b/arch/um/drivers/umcast_user.c
new file mode 100644
index 000000000000..59c56fd6f52a
--- /dev/null
+++ b/arch/um/drivers/umcast_user.c
@@ -0,0 +1,186 @@
1/*
2 * user-mode-linux networking multicast transport
3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Copyright (C) 2001 by Harald Welte <laforge@gnumonks.org>
5 *
6 * based on the existing uml-networking code, which is
7 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
8 * James Leu (jleu@mindspring.net).
9 * Copyright (C) 2001 by various other people who didn't put their name here.
10 *
11 * Licensed under the GPL.
12 *
13 */
14
15#include <unistd.h>
16#include <errno.h>
17#include <netinet/in.h>
18#include "kern_constants.h"
19#include "umcast.h"
20#include "net_user.h"
21#include "um_malloc.h"
22#include "user.h"
23
24static struct sockaddr_in *new_addr(char *addr, unsigned short port)
25{
26 struct sockaddr_in *sin;
27
28 sin = uml_kmalloc(sizeof(struct sockaddr_in), UM_GFP_KERNEL);
29 if (sin == NULL) {
30 printk(UM_KERN_ERR "new_addr: allocation of sockaddr_in "
31 "failed\n");
32 return NULL;
33 }
34 sin->sin_family = AF_INET;
35 if (addr)
36 sin->sin_addr.s_addr = in_aton(addr);
37 else
38 sin->sin_addr.s_addr = INADDR_ANY;
39 sin->sin_port = htons(port);
40 return sin;
41}
42
43static int umcast_user_init(void *data, void *dev)
44{
45 struct umcast_data *pri = data;
46
47 pri->remote_addr = new_addr(pri->addr, pri->rport);
48 if (pri->unicast)
49 pri->listen_addr = new_addr(NULL, pri->lport);
50 else
51 pri->listen_addr = pri->remote_addr;
52 pri->dev = dev;
53 return 0;
54}
55
56static void umcast_remove(void *data)
57{
58 struct umcast_data *pri = data;
59
60 kfree(pri->listen_addr);
61 if (pri->unicast)
62 kfree(pri->remote_addr);
63 pri->listen_addr = pri->remote_addr = NULL;
64}
65
66static int umcast_open(void *data)
67{
68 struct umcast_data *pri = data;
69 struct sockaddr_in *lsin = pri->listen_addr;
70 struct sockaddr_in *rsin = pri->remote_addr;
71 struct ip_mreq mreq;
72 int fd, yes = 1, err = -EINVAL;
73
74
75 if ((!pri->unicast && lsin->sin_addr.s_addr == 0) ||
76 (rsin->sin_addr.s_addr == 0) ||
77 (lsin->sin_port == 0) || (rsin->sin_port == 0))
78 goto out;
79
80 fd = socket(AF_INET, SOCK_DGRAM, 0);
81
82 if (fd < 0) {
83 err = -errno;
84 printk(UM_KERN_ERR "umcast_open : data socket failed, "
85 "errno = %d\n", errno);
86 goto out;
87 }
88
89 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) {
90 err = -errno;
91 printk(UM_KERN_ERR "umcast_open: SO_REUSEADDR failed, "
92 "errno = %d\n", errno);
93 goto out_close;
94 }
95
96 if (!pri->unicast) {
97 /* set ttl according to config */
98 if (setsockopt(fd, SOL_IP, IP_MULTICAST_TTL, &pri->ttl,
99 sizeof(pri->ttl)) < 0) {
100 err = -errno;
101 printk(UM_KERN_ERR "umcast_open: IP_MULTICAST_TTL "
102 "failed, error = %d\n", errno);
103 goto out_close;
104 }
105
106 /* set LOOP, so data does get fed back to local sockets */
107 if (setsockopt(fd, SOL_IP, IP_MULTICAST_LOOP,
108 &yes, sizeof(yes)) < 0) {
109 err = -errno;
110 printk(UM_KERN_ERR "umcast_open: IP_MULTICAST_LOOP "
111 "failed, error = %d\n", errno);
112 goto out_close;
113 }
114 }
115
116 /* bind socket to the address */
117 if (bind(fd, (struct sockaddr *) lsin, sizeof(*lsin)) < 0) {
118 err = -errno;
119 printk(UM_KERN_ERR "umcast_open : data bind failed, "
120 "errno = %d\n", errno);
121 goto out_close;
122 }
123
124 if (!pri->unicast) {
125 /* subscribe to the multicast group */
126 mreq.imr_multiaddr.s_addr = lsin->sin_addr.s_addr;
127 mreq.imr_interface.s_addr = 0;
128 if (setsockopt(fd, SOL_IP, IP_ADD_MEMBERSHIP,
129 &mreq, sizeof(mreq)) < 0) {
130 err = -errno;
131 printk(UM_KERN_ERR "umcast_open: IP_ADD_MEMBERSHIP "
132 "failed, error = %d\n", errno);
133 printk(UM_KERN_ERR "There appears not to be a "
134 "multicast-capable network interface on the "
135 "host.\n");
136 printk(UM_KERN_ERR "eth0 should be configured in order "
137 "to use the multicast transport.\n");
138 goto out_close;
139 }
140 }
141
142 return fd;
143
144 out_close:
145 close(fd);
146 out:
147 return err;
148}
149
150static void umcast_close(int fd, void *data)
151{
152 struct umcast_data *pri = data;
153
154 if (!pri->unicast) {
155 struct ip_mreq mreq;
156 struct sockaddr_in *lsin = pri->listen_addr;
157
158 mreq.imr_multiaddr.s_addr = lsin->sin_addr.s_addr;
159 mreq.imr_interface.s_addr = 0;
160 if (setsockopt(fd, SOL_IP, IP_DROP_MEMBERSHIP,
161 &mreq, sizeof(mreq)) < 0) {
162 printk(UM_KERN_ERR "umcast_close: IP_DROP_MEMBERSHIP "
163 "failed, error = %d\n", errno);
164 }
165 }
166
167 close(fd);
168}
169
170int umcast_user_write(int fd, void *buf, int len, struct umcast_data *pri)
171{
172 struct sockaddr_in *data_addr = pri->remote_addr;
173
174 return net_sendto(fd, buf, len, data_addr, sizeof(*data_addr));
175}
176
177const struct net_user_info umcast_user_info = {
178 .init = umcast_user_init,
179 .open = umcast_open,
180 .close = umcast_close,
181 .remove = umcast_remove,
182 .add_address = NULL,
183 .delete_address = NULL,
184 .mtu = ETH_MAX_PACKET,
185 .max_packet = ETH_MAX_PACKET + ETH_HEADER_OTHER,
186};
diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c
index da2caa5a21ef..8ac7146c237f 100644
--- a/arch/um/drivers/xterm.c
+++ b/arch/um/drivers/xterm.c
@@ -90,7 +90,7 @@ static int xterm_open(int input, int output, int primary, void *d,
90 int pid, fd, new, err; 90 int pid, fd, new, err;
91 char title[256], file[] = "/tmp/xterm-pipeXXXXXX"; 91 char title[256], file[] = "/tmp/xterm-pipeXXXXXX";
92 char *argv[] = { terminal_emulator, title_switch, title, exec_switch, 92 char *argv[] = { terminal_emulator, title_switch, title, exec_switch,
93 "/usr/lib/uml/port-helper", "-uml-socket", 93 OS_LIB_PATH "/uml/port-helper", "-uml-socket",
94 file, NULL }; 94 file, NULL };
95 95
96 if (access(argv[4], X_OK) < 0) 96 if (access(argv[4], X_OK) < 0)
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index d1d1b0d8a0cd..98d01bc4fa92 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -14,6 +14,8 @@ struct task_struct;
14#include "registers.h" 14#include "registers.h"
15#include "sysdep/archsetjmp.h" 15#include "sysdep/archsetjmp.h"
16 16
17#include <linux/prefetch.h>
18
17struct mm_struct; 19struct mm_struct;
18 20
19struct thread_struct { 21struct thread_struct {
diff --git a/arch/um/include/asm/smp.h b/arch/um/include/asm/smp.h
index f27a96313174..4a4b09d4f366 100644
--- a/arch/um/include/asm/smp.h
+++ b/arch/um/include/asm/smp.h
@@ -11,7 +11,6 @@
11 11
12#define cpu_logical_map(n) (n) 12#define cpu_logical_map(n) (n)
13#define cpu_number_map(n) (n) 13#define cpu_number_map(n) (n)
14#define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */
15extern int hard_smp_processor_id(void); 14extern int hard_smp_processor_id(void);
16#define NO_PROC_ID -1 15#define NO_PROC_ID -1
17 16
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 660caedac9eb..4febacd1a8a1 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -22,9 +22,6 @@ struct mmu_gather {
22 unsigned int fullmm; /* non-zero means full mm flush */ 22 unsigned int fullmm; /* non-zero means full mm flush */
23}; 23};
24 24
25/* Users of the generic TLB shootdown code must declare this storage space. */
26DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
27
28static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, 25static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
29 unsigned long address) 26 unsigned long address)
30{ 27{
@@ -47,27 +44,20 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
47 } 44 }
48} 45}
49 46
50/* tlb_gather_mmu 47static inline void
51 * Return a pointer to an initialized struct mmu_gather. 48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
52 */
53static inline struct mmu_gather *
54tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
55{ 49{
56 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
57
58 tlb->mm = mm; 50 tlb->mm = mm;
59 tlb->fullmm = full_mm_flush; 51 tlb->fullmm = full_mm_flush;
60 52
61 init_tlb_gather(tlb); 53 init_tlb_gather(tlb);
62
63 return tlb;
64} 54}
65 55
66extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 56extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
67 unsigned long end); 57 unsigned long end);
68 58
69static inline void 59static inline void
70tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 60tlb_flush_mmu(struct mmu_gather *tlb)
71{ 61{
72 if (!tlb->need_flush) 62 if (!tlb->need_flush)
73 return; 63 return;
@@ -83,12 +73,10 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
83static inline void 73static inline void
84tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 74tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
85{ 75{
86 tlb_flush_mmu(tlb, start, end); 76 tlb_flush_mmu(tlb);
87 77
88 /* keep the page table cache within bounds */ 78 /* keep the page table cache within bounds */
89 check_pgt_cache(); 79 check_pgt_cache();
90
91 put_cpu_var(mmu_gathers);
92} 80}
93 81
94/* tlb_remove_page 82/* tlb_remove_page
@@ -96,11 +84,16 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
96 * while handling the additional races in SMP caused by other CPUs 84 * while handling the additional races in SMP caused by other CPUs
97 * caching valid mappings in their TLBs. 85 * caching valid mappings in their TLBs.
98 */ 86 */
99static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 87static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
100{ 88{
101 tlb->need_flush = 1; 89 tlb->need_flush = 1;
102 free_page_and_swap_cache(page); 90 free_page_and_swap_cache(page);
103 return; 91 return 1; /* avoid calling tlb_flush_mmu */
92}
93
94static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
95{
96 __tlb_remove_page(tlb, page);
104} 97}
105 98
106/** 99/**
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index c4617baaa4f2..83c7c2ecd614 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -29,6 +29,12 @@
29#define OS_ACC_R_OK 4 /* Test for read permission. */ 29#define OS_ACC_R_OK 4 /* Test for read permission. */
30#define OS_ACC_RW_OK (OS_ACC_W_OK | OS_ACC_R_OK) /* Test for RW permission */ 30#define OS_ACC_RW_OK (OS_ACC_W_OK | OS_ACC_R_OK) /* Test for RW permission */
31 31
32#ifdef CONFIG_64BIT
33#define OS_LIB_PATH "/usr/lib64/"
34#else
35#define OS_LIB_PATH "/usr/lib/"
36#endif
37
32/* 38/*
33 * types taken from stat_file() in hostfs_user.c 39 * types taken from stat_file() in hostfs_user.c
34 * (if they are wrong here, they are wrong there...). 40 * (if they are wrong here, they are wrong there...).
@@ -238,6 +244,7 @@ extern int raw(int fd);
238extern void setup_machinename(char *machine_out); 244extern void setup_machinename(char *machine_out);
239extern void setup_hostinfo(char *buf, int len); 245extern void setup_hostinfo(char *buf, int len);
240extern void os_dump_core(void) __attribute__ ((noreturn)); 246extern void os_dump_core(void) __attribute__ ((noreturn));
247extern void um_early_printk(const char *s, unsigned int n);
241 248
242/* time.c */ 249/* time.c */
243extern void idle_sleep(unsigned long long nsecs); 250extern void idle_sleep(unsigned long long nsecs);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 1119233597a1..c4491c15afb2 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -17,6 +17,7 @@ obj-y = config.o exec.o exitcode.o init_task.o irq.o ksyms.o mem.o \
17obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o 17obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
18obj-$(CONFIG_GPROF) += gprof_syms.o 18obj-$(CONFIG_GPROF) += gprof_syms.o
19obj-$(CONFIG_GCOV) += gmon_syms.o 19obj-$(CONFIG_GCOV) += gmon_syms.o
20obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
20 21
21USER_OBJS := config.o 22USER_OBJS := config.o
22 23
diff --git a/arch/um/kernel/early_printk.c b/arch/um/kernel/early_printk.c
new file mode 100644
index 000000000000..ec649bf72f68
--- /dev/null
+++ b/arch/um/kernel/early_printk.c
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2011 Richard Weinberger <richrd@nod.at>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kernel.h>
10#include <linux/console.h>
11#include <linux/init.h>
12#include "os.h"
13
14static void early_console_write(struct console *con, const char *s, unsigned int n)
15{
16 um_early_printk(s, n);
17}
18
19static struct console early_console = {
20 .name = "earlycon",
21 .write = early_console_write,
22 .flags = CON_BOOT,
23 .index = -1,
24};
25
26static int __init setup_early_printk(char *buf)
27{
28 register_console(&early_console);
29
30 return 0;
31}
32
33early_param("earlyprintk", setup_early_printk);
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index eefb107d2d73..155206a66908 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -7,9 +7,6 @@
7#include "asm/pgalloc.h" 7#include "asm/pgalloc.h"
8#include "asm/tlb.h" 8#include "asm/tlb.h"
9 9
10/* For some reason, mmu_gathers are referenced when CONFIG_SMP is off. */
11DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
12
13#ifdef CONFIG_SMP 10#ifdef CONFIG_SMP
14 11
15#include "linux/sched.h" 12#include "linux/sched.h"
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 637c6505dc00..8c7b8823d1f0 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -113,6 +113,27 @@ out_of_memory:
113 return 0; 113 return 0;
114} 114}
115 115
116static void show_segv_info(struct uml_pt_regs *regs)
117{
118 struct task_struct *tsk = current;
119 struct faultinfo *fi = UPT_FAULTINFO(regs);
120
121 if (!unhandled_signal(tsk, SIGSEGV))
122 return;
123
124 if (!printk_ratelimit())
125 return;
126
127 printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x",
128 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
129 tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
130 (void *)UPT_IP(regs), (void *)UPT_SP(regs),
131 fi->error_code);
132
133 print_vma_addr(KERN_CONT " in ", UPT_IP(regs));
134 printk(KERN_CONT "\n");
135}
136
116static void bad_segv(struct faultinfo fi, unsigned long ip) 137static void bad_segv(struct faultinfo fi, unsigned long ip)
117{ 138{
118 struct siginfo si; 139 struct siginfo si;
@@ -141,6 +162,7 @@ void segv_handler(int sig, struct uml_pt_regs *regs)
141 struct faultinfo * fi = UPT_FAULTINFO(regs); 162 struct faultinfo * fi = UPT_FAULTINFO(regs);
142 163
143 if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) { 164 if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) {
165 show_segv_info(regs);
144 bad_segv(*fi, UPT_IP(regs)); 166 bad_segv(*fi, UPT_IP(regs));
145 return; 167 return;
146 } 168 }
@@ -202,6 +224,8 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
202 address, ip); 224 address, ip);
203 } 225 }
204 226
227 show_segv_info(regs);
228
205 if (err == -EACCES) { 229 if (err == -EACCES) {
206 si.si_signo = SIGBUS; 230 si.si_signo = SIGBUS;
207 si.si_errno = 0; 231 si.si_errno = 0;
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index eee69b9f52c9..fb2a97a75fb1 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -78,7 +78,7 @@ static void install_fatal_handler(int sig)
78 } 78 }
79} 79}
80 80
81#define UML_LIB_PATH ":/usr/lib/uml" 81#define UML_LIB_PATH ":" OS_LIB_PATH "/uml"
82 82
83static void setup_env_path(void) 83static void setup_env_path(void)
84{ 84{
@@ -142,7 +142,6 @@ int __init main(int argc, char **argv, char **envp)
142 */ 142 */
143 install_fatal_handler(SIGINT); 143 install_fatal_handler(SIGINT);
144 install_fatal_handler(SIGTERM); 144 install_fatal_handler(SIGTERM);
145 install_fatal_handler(SIGHUP);
146 145
147 scan_elf_aux(envp); 146 scan_elf_aux(envp);
148 147
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index e0477c3ee894..0c45dc8efb05 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -253,6 +253,7 @@ void init_new_thread_signals(void)
253 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGALRM, 253 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGALRM,
254 SIGVTALRM, -1); 254 SIGVTALRM, -1);
255 signal(SIGWINCH, SIG_IGN); 255 signal(SIGWINCH, SIG_IGN);
256 signal(SIGTERM, SIG_DFL);
256} 257}
257 258
258int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr) 259int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr)
diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
index 42827cafa6af..5803b1887672 100644
--- a/arch/um/os-Linux/util.c
+++ b/arch/um/os-Linux/util.c
@@ -139,3 +139,8 @@ void os_dump_core(void)
139 139
140 uml_abort(); 140 uml_abort();
141} 141}
142
143void um_early_printk(const char *s, unsigned int n)
144{
145 printf("%.*s", n, s);
146}
diff --git a/arch/unicore32/Kconfig.debug b/arch/unicore32/Kconfig.debug
index 3140151ede45..ae2ec334c3c6 100644
--- a/arch/unicore32/Kconfig.debug
+++ b/arch/unicore32/Kconfig.debug
@@ -27,13 +27,6 @@ config EARLY_PRINTK
27 with klogd/syslogd or the X server. You should normally N here, 27 with klogd/syslogd or the X server. You should normally N here,
28 unless you want to debug such a crash. 28 unless you want to debug such a crash.
29 29
30config DEBUG_STACK_USAGE
31 bool "Enable stack utilization instrumentation"
32 depends on DEBUG_KERNEL
33 help
34 Enables the display of the minimum amount of free stack which each
35 task has ever had available in the sysrq-T output.
36
37# These options are only for real kernel hackers who want to get their hands dirty. 30# These options are only for real kernel hackers who want to get their hands dirty.
38config DEBUG_LL 31config DEBUG_LL
39 bool "Kernel low-level debugging functions" 32 bool "Kernel low-level debugging functions"
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index 1fc02633f700..2d3e7112d2a3 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -62,7 +62,7 @@ void show_mem(unsigned int filter)
62 struct meminfo *mi = &meminfo; 62 struct meminfo *mi = &meminfo;
63 63
64 printk(KERN_DEFAULT "Mem-info:\n"); 64 printk(KERN_DEFAULT "Mem-info:\n");
65 show_free_areas(); 65 show_free_areas(filter);
66 66
67 for_each_bank(i, mi) { 67 for_each_bank(i, mi) {
68 struct membank *bank = &mi->bank[i]; 68 struct membank *bank = &mi->bank[i];
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index db2d334941b4..3e5c3e5a0b45 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -30,8 +30,6 @@
30 30
31#include "mm.h" 31#include "mm.h"
32 32
33DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
34
35/* 33/*
36 * empty_zero_page is a special page that is used for 34 * empty_zero_page is a special page that is used for
37 * zero-initialized data and COW. 35 * zero-initialized data and COW.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index dcf25b71f6d7..483775f42d2a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -17,8 +17,6 @@ config X86_64
17config X86 17config X86
18 def_bool y 18 def_bool y
19 select HAVE_AOUT if X86_32 19 select HAVE_AOUT if X86_32
20 select HAVE_READQ
21 select HAVE_WRITEQ
22 select HAVE_UNSTABLE_SCHED_CLOCK 20 select HAVE_UNSTABLE_SCHED_CLOCK
23 select HAVE_IDE 21 select HAVE_IDE
24 select HAVE_OPROFILE 22 select HAVE_OPROFILE
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 615e18810f48..c0f8a5c88910 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -66,26 +66,6 @@ config DEBUG_STACKOVERFLOW
66 This option will cause messages to be printed if free stack space 66 This option will cause messages to be printed if free stack space
67 drops below a certain limit. 67 drops below a certain limit.
68 68
69config DEBUG_STACK_USAGE
70 bool "Stack utilization instrumentation"
71 depends on DEBUG_KERNEL
72 ---help---
73 Enables the display of the minimum amount of free stack which each
74 task has ever had available in the sysrq-T and sysrq-P debug output.
75
76 This option will slow down process creation somewhat.
77
78config DEBUG_PER_CPU_MAPS
79 bool "Debug access to per_cpu maps"
80 depends on DEBUG_KERNEL
81 depends on SMP
82 ---help---
83 Say Y to verify that the per_cpu map being accessed has
84 been setup. Adds a fair amount of code to kernel memory
85 and decreases performance.
86
87 Say N if unsure.
88
89config X86_PTDUMP 69config X86_PTDUMP
90 bool "Export kernel pagetable layout to userspace via debugfs" 70 bool "Export kernel pagetable layout to userspace via debugfs"
91 depends on DEBUG_KERNEL 71 depends on DEBUG_KERNEL
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 072273082528..d02804d650c4 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -38,7 +38,6 @@
38 38
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/compiler.h> 40#include <linux/compiler.h>
41#include <asm-generic/int-ll64.h>
42#include <asm/page.h> 41#include <asm/page.h>
43 42
44#include <xen/xen.h> 43#include <xen/xen.h>
@@ -87,27 +86,6 @@ build_mmio_write(__writel, "l", unsigned int, "r", )
87build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 86build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
88build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 87build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
89 88
90#else
91
92static inline __u64 readq(const volatile void __iomem *addr)
93{
94 const volatile u32 __iomem *p = addr;
95 u32 low, high;
96
97 low = readl(p);
98 high = readl(p + 1);
99
100 return low + ((u64)high << 32);
101}
102
103static inline void writeq(__u64 val, volatile void __iomem *addr)
104{
105 writel(val, addr);
106 writel(val >> 32, addr+4);
107}
108
109#endif
110
111#define readq_relaxed(a) readq(a) 89#define readq_relaxed(a) readq(a)
112 90
113#define __raw_readq(a) readq(a) 91#define __raw_readq(a) readq(a)
@@ -117,6 +95,8 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
117#define readq readq 95#define readq readq
118#define writeq writeq 96#define writeq writeq
119 97
98#endif
99
120/** 100/**
121 * virt_to_phys - map virtual addresses to physical 101 * virt_to_phys - map virtual addresses to physical
122 * @address: address to remap 102 * @address: address to remap
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 605e5ae19c7f..a3e5948670c2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -946,6 +946,8 @@ void __init setup_arch(char **cmdline_p)
946 if (init_ohci1394_dma_early) 946 if (init_ohci1394_dma_early)
947 init_ohci1394_dma_on_all_controllers(); 947 init_ohci1394_dma_on_all_controllers();
948#endif 948#endif
949 /* Allocate bigger log buffer */
950 setup_log_buf(1);
949 951
950 reserve_initrd(); 952 reserve_initrd();
951 953
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 998e972f3b1a..30ac65df7d4e 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -110,7 +110,6 @@ static struct mm_struct tboot_mm = {
110 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), 110 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
111 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), 111 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
112 .mmlist = LIST_HEAD_INIT(init_mm.mmlist), 112 .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
113 .cpu_vm_mask = CPU_MASK_ALL,
114}; 113};
115 114
116static inline void switch_to_tboot_pt(void) 115static inline void switch_to_tboot_pt(void)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 28418054b880..bd14bb4c8594 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3545,10 +3545,11 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3545 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list); 3545 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3546} 3546}
3547 3547
3548static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 3548static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
3549{ 3549{
3550 struct kvm *kvm; 3550 struct kvm *kvm;
3551 struct kvm *kvm_freed = NULL; 3551 struct kvm *kvm_freed = NULL;
3552 int nr_to_scan = sc->nr_to_scan;
3552 3553
3553 if (nr_to_scan == 0) 3554 if (nr_to_scan == 0)
3554 goto out; 3555 goto out;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index bcb394dfbb35..f7a2a054a3c0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -965,7 +965,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
965 struct mm_struct *mm; 965 struct mm_struct *mm;
966 int fault; 966 int fault;
967 int write = error_code & PF_WRITE; 967 int write = error_code & PF_WRITE;
968 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | 968 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
969 (write ? FAULT_FLAG_WRITE : 0); 969 (write ? FAULT_FLAG_WRITE : 0);
970 970
971 tsk = current; 971 tsk = current;
@@ -1139,6 +1139,16 @@ good_area:
1139 } 1139 }
1140 1140
1141 /* 1141 /*
1142 * Pagefault was interrupted by SIGKILL. We have no reason to
1143 * continue pagefault.
1144 */
1145 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
1146 if (!(error_code & PF_USER))
1147 no_context(regs, error_code, address);
1148 return;
1149 }
1150
1151 /*
1142 * Major/minor page fault accounting is only done on the 1152 * Major/minor page fault accounting is only done on the
1143 * initial attempt. If we go through a retry, it is extremely 1153 * initial attempt. If we go through a retry, it is extremely
1144 * likely that the page will be found in page cache at that point. 1154 * likely that the page will be found in page cache at that point.
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index d4203988504a..f581a18c0d4d 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -72,7 +72,7 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
72 if (!vma_shareable(vma, addr)) 72 if (!vma_shareable(vma, addr))
73 return; 73 return;
74 74
75 spin_lock(&mapping->i_mmap_lock); 75 mutex_lock(&mapping->i_mmap_mutex);
76 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { 76 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
77 if (svma == vma) 77 if (svma == vma)
78 continue; 78 continue;
@@ -97,7 +97,7 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
97 put_page(virt_to_page(spte)); 97 put_page(virt_to_page(spte));
98 spin_unlock(&mm->page_table_lock); 98 spin_unlock(&mm->page_table_lock);
99out: 99out:
100 spin_unlock(&mapping->i_mmap_lock); 100 mutex_unlock(&mapping->i_mmap_mutex);
101} 101}
102 102
103/* 103/*
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 37b8b0fe8320..30326443ab81 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -16,8 +16,6 @@
16#include <asm/tlb.h> 16#include <asm/tlb.h>
17#include <asm/proto.h> 17#include <asm/proto.h>
18 18
19DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
20
21unsigned long __initdata pgt_buf_start; 19unsigned long __initdata pgt_buf_start;
22unsigned long __meminitdata pgt_buf_end; 20unsigned long __meminitdata pgt_buf_end;
23unsigned long __meminitdata pgt_buf_top; 21unsigned long __meminitdata pgt_buf_top;
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 161bb89e98c8..7a5591a71f85 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -171,10 +171,6 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
171#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 171#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
172#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 172#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
173 173
174#ifdef CONFIG_MMU
175#define WANT_PAGE_VIRTUAL
176#endif
177
178#endif /* __ASSEMBLY__ */ 174#endif /* __ASSEMBLY__ */
179 175
180#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 176#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 4bb91a970f1f..ca81654f3ec2 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -14,8 +14,6 @@
14#include <asm/mmu_context.h> 14#include <asm/mmu_context.h>
15#include <asm/page.h> 15#include <asm/page.h>
16 16
17DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
18
19void __init paging_init(void) 17void __init paging_init(void)
20{ 18{
21 memset(swapper_pg_dir, 0, PAGE_SIZE); 19 memset(swapper_pg_dir, 0, PAGE_SIZE);
diff --git a/arch/xtensa/mm/pgtable.c b/arch/xtensa/mm/pgtable.c
deleted file mode 100644
index 697992738205..000000000000
--- a/arch/xtensa/mm/pgtable.c
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * arch/xtensa/mm/pgtable.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 */
12
13#if (DCACHE_SIZE > PAGE_SIZE)
14
15pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
16{
17 pte_t *pte = NULL, *p;
18 int color = ADDR_COLOR(address);
19 int i;
20
21 p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);
22
23 if (likely(p)) {
24 split_page(virt_to_page(p), COLOR_ORDER);
25
26 for (i = 0; i < COLOR_SIZE; i++) {
27 if (ADDR_COLOR(p) == color)
28 pte = p;
29 else
30 free_page(p);
31 p += PTRS_PER_PTE;
32 }
33 clear_page(pte);
34 }
35 return pte;
36}
37
38#ifdef PROFILING
39
40int mask;
41int hit;
42int flush;
43
44#endif
45
46struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
47{
48 struct page *page = NULL, *p;
49 int color = ADDR_COLOR(address);
50
51 p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
52
53 if (likely(p)) {
54 split_page(p, COLOR_ORDER);
55
56 for (i = 0; i < PAGE_ORDER; i++) {
57 if (PADDR_COLOR(page_address(p)) == color)
58 page = p;
59 else
60 __free_page(p);
61 p++;
62 }
63 clear_highpage(page);
64 }
65
66 return page;
67}
68
69#endif
70
71
72
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 471fdcc5df85..07371cfdfae6 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -385,25 +385,40 @@ void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
385 385
386 spin_lock_irqsave(&blkg->stats_lock, flags); 386 spin_lock_irqsave(&blkg->stats_lock, flags);
387 blkg->stats.time += time; 387 blkg->stats.time += time;
388#ifdef CONFIG_DEBUG_BLK_CGROUP
388 blkg->stats.unaccounted_time += unaccounted_time; 389 blkg->stats.unaccounted_time += unaccounted_time;
390#endif
389 spin_unlock_irqrestore(&blkg->stats_lock, flags); 391 spin_unlock_irqrestore(&blkg->stats_lock, flags);
390} 392}
391EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); 393EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
392 394
395/*
396 * should be called under rcu read lock or queue lock to make sure blkg pointer
397 * is valid.
398 */
393void blkiocg_update_dispatch_stats(struct blkio_group *blkg, 399void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
394 uint64_t bytes, bool direction, bool sync) 400 uint64_t bytes, bool direction, bool sync)
395{ 401{
396 struct blkio_group_stats *stats; 402 struct blkio_group_stats_cpu *stats_cpu;
397 unsigned long flags; 403 unsigned long flags;
398 404
399 spin_lock_irqsave(&blkg->stats_lock, flags); 405 /*
400 stats = &blkg->stats; 406 * Disabling interrupts to provide mutual exclusion between two
401 stats->sectors += bytes >> 9; 407 * writes on same cpu. It probably is not needed for 64bit. Not
402 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction, 408 * optimizing that case yet.
403 sync); 409 */
404 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes, 410 local_irq_save(flags);
405 direction, sync); 411
406 spin_unlock_irqrestore(&blkg->stats_lock, flags); 412 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
413
414 u64_stats_update_begin(&stats_cpu->syncp);
415 stats_cpu->sectors += bytes >> 9;
416 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
417 1, direction, sync);
418 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
419 bytes, direction, sync);
420 u64_stats_update_end(&stats_cpu->syncp);
421 local_irq_restore(flags);
407} 422}
408EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); 423EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
409 424
@@ -426,18 +441,44 @@ void blkiocg_update_completion_stats(struct blkio_group *blkg,
426} 441}
427EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); 442EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
428 443
444/* Merged stats are per cpu. */
429void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, 445void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
430 bool sync) 446 bool sync)
431{ 447{
448 struct blkio_group_stats_cpu *stats_cpu;
432 unsigned long flags; 449 unsigned long flags;
433 450
434 spin_lock_irqsave(&blkg->stats_lock, flags); 451 /*
435 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction, 452 * Disabling interrupts to provide mutual exclusion between two
436 sync); 453 * writes on same cpu. It probably is not needed for 64bit. Not
437 spin_unlock_irqrestore(&blkg->stats_lock, flags); 454 * optimizing that case yet.
455 */
456 local_irq_save(flags);
457
458 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
459
460 u64_stats_update_begin(&stats_cpu->syncp);
461 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
462 direction, sync);
463 u64_stats_update_end(&stats_cpu->syncp);
464 local_irq_restore(flags);
438} 465}
439EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); 466EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
440 467
468/*
469 * This function allocates the per cpu stats for blkio_group. Should be called
470 * from sleepable context as alloc_per_cpu() requires that.
471 */
472int blkio_alloc_blkg_stats(struct blkio_group *blkg)
473{
474 /* Allocate memory for per cpu stats */
475 blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
476 if (!blkg->stats_cpu)
477 return -ENOMEM;
478 return 0;
479}
480EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
481
441void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 482void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
442 struct blkio_group *blkg, void *key, dev_t dev, 483 struct blkio_group *blkg, void *key, dev_t dev,
443 enum blkio_policy_id plid) 484 enum blkio_policy_id plid)
@@ -508,6 +549,30 @@ struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
508} 549}
509EXPORT_SYMBOL_GPL(blkiocg_lookup_group); 550EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
510 551
552static void blkio_reset_stats_cpu(struct blkio_group *blkg)
553{
554 struct blkio_group_stats_cpu *stats_cpu;
555 int i, j, k;
556 /*
557 * Note: On 64 bit arch this should not be an issue. This has the
558 * possibility of returning some inconsistent value on 32bit arch
559 * as 64bit update on 32bit is non atomic. Taking care of this
560 * corner case makes code very complicated, like sending IPIs to
561 * cpus, taking care of stats of offline cpus etc.
562 *
563 * reset stats is anyway more of a debug feature and this sounds a
564 * corner case. So I am not complicating the code yet until and
565 * unless this becomes a real issue.
566 */
567 for_each_possible_cpu(i) {
568 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
569 stats_cpu->sectors = 0;
570 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
571 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
572 stats_cpu->stat_arr_cpu[j][k] = 0;
573 }
574}
575
511static int 576static int
512blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) 577blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
513{ 578{
@@ -552,7 +617,11 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
552 } 617 }
553#endif 618#endif
554 spin_unlock(&blkg->stats_lock); 619 spin_unlock(&blkg->stats_lock);
620
621 /* Reset Per cpu stats which don't take blkg->stats_lock */
622 blkio_reset_stats_cpu(blkg);
555 } 623 }
624
556 spin_unlock_irq(&blkcg->lock); 625 spin_unlock_irq(&blkcg->lock);
557 return 0; 626 return 0;
558} 627}
@@ -598,6 +667,59 @@ static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
598 return val; 667 return val;
599} 668}
600 669
670
671static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
672 enum stat_type_cpu type, enum stat_sub_type sub_type)
673{
674 int cpu;
675 struct blkio_group_stats_cpu *stats_cpu;
676 u64 val = 0, tval;
677
678 for_each_possible_cpu(cpu) {
679 unsigned int start;
680 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
681
682 do {
683 start = u64_stats_fetch_begin(&stats_cpu->syncp);
684 if (type == BLKIO_STAT_CPU_SECTORS)
685 tval = stats_cpu->sectors;
686 else
687 tval = stats_cpu->stat_arr_cpu[type][sub_type];
688 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
689
690 val += tval;
691 }
692
693 return val;
694}
695
696static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
697 struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
698{
699 uint64_t disk_total, val;
700 char key_str[MAX_KEY_LEN];
701 enum stat_sub_type sub_type;
702
703 if (type == BLKIO_STAT_CPU_SECTORS) {
704 val = blkio_read_stat_cpu(blkg, type, 0);
705 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
706 }
707
708 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
709 sub_type++) {
710 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
711 val = blkio_read_stat_cpu(blkg, type, sub_type);
712 cb->fill(cb, key_str, val);
713 }
714
715 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
716 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
717
718 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
719 cb->fill(cb, key_str, disk_total);
720 return disk_total;
721}
722
601/* This should be called with blkg->stats_lock held */ 723/* This should be called with blkg->stats_lock held */
602static uint64_t blkio_get_stat(struct blkio_group *blkg, 724static uint64_t blkio_get_stat(struct blkio_group *blkg,
603 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type) 725 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
@@ -609,9 +731,6 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg,
609 if (type == BLKIO_STAT_TIME) 731 if (type == BLKIO_STAT_TIME)
610 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 732 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
611 blkg->stats.time, cb, dev); 733 blkg->stats.time, cb, dev);
612 if (type == BLKIO_STAT_SECTORS)
613 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
614 blkg->stats.sectors, cb, dev);
615#ifdef CONFIG_DEBUG_BLK_CGROUP 734#ifdef CONFIG_DEBUG_BLK_CGROUP
616 if (type == BLKIO_STAT_UNACCOUNTED_TIME) 735 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
617 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, 736 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
@@ -1075,8 +1194,8 @@ static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1075} 1194}
1076 1195
1077static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, 1196static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1078 struct cftype *cft, struct cgroup_map_cb *cb, enum stat_type type, 1197 struct cftype *cft, struct cgroup_map_cb *cb,
1079 bool show_total) 1198 enum stat_type type, bool show_total, bool pcpu)
1080{ 1199{
1081 struct blkio_group *blkg; 1200 struct blkio_group *blkg;
1082 struct hlist_node *n; 1201 struct hlist_node *n;
@@ -1087,10 +1206,15 @@ static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1087 if (blkg->dev) { 1206 if (blkg->dev) {
1088 if (!cftype_blkg_same_policy(cft, blkg)) 1207 if (!cftype_blkg_same_policy(cft, blkg))
1089 continue; 1208 continue;
1090 spin_lock_irq(&blkg->stats_lock); 1209 if (pcpu)
1091 cgroup_total += blkio_get_stat(blkg, cb, blkg->dev, 1210 cgroup_total += blkio_get_stat_cpu(blkg, cb,
1092 type); 1211 blkg->dev, type);
1093 spin_unlock_irq(&blkg->stats_lock); 1212 else {
1213 spin_lock_irq(&blkg->stats_lock);
1214 cgroup_total += blkio_get_stat(blkg, cb,
1215 blkg->dev, type);
1216 spin_unlock_irq(&blkg->stats_lock);
1217 }
1094 } 1218 }
1095 } 1219 }
1096 if (show_total) 1220 if (show_total)
@@ -1114,47 +1238,47 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1114 switch(name) { 1238 switch(name) {
1115 case BLKIO_PROP_time: 1239 case BLKIO_PROP_time:
1116 return blkio_read_blkg_stats(blkcg, cft, cb, 1240 return blkio_read_blkg_stats(blkcg, cft, cb,
1117 BLKIO_STAT_TIME, 0); 1241 BLKIO_STAT_TIME, 0, 0);
1118 case BLKIO_PROP_sectors: 1242 case BLKIO_PROP_sectors:
1119 return blkio_read_blkg_stats(blkcg, cft, cb, 1243 return blkio_read_blkg_stats(blkcg, cft, cb,
1120 BLKIO_STAT_SECTORS, 0); 1244 BLKIO_STAT_CPU_SECTORS, 0, 1);
1121 case BLKIO_PROP_io_service_bytes: 1245 case BLKIO_PROP_io_service_bytes:
1122 return blkio_read_blkg_stats(blkcg, cft, cb, 1246 return blkio_read_blkg_stats(blkcg, cft, cb,
1123 BLKIO_STAT_SERVICE_BYTES, 1); 1247 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1124 case BLKIO_PROP_io_serviced: 1248 case BLKIO_PROP_io_serviced:
1125 return blkio_read_blkg_stats(blkcg, cft, cb, 1249 return blkio_read_blkg_stats(blkcg, cft, cb,
1126 BLKIO_STAT_SERVICED, 1); 1250 BLKIO_STAT_CPU_SERVICED, 1, 1);
1127 case BLKIO_PROP_io_service_time: 1251 case BLKIO_PROP_io_service_time:
1128 return blkio_read_blkg_stats(blkcg, cft, cb, 1252 return blkio_read_blkg_stats(blkcg, cft, cb,
1129 BLKIO_STAT_SERVICE_TIME, 1); 1253 BLKIO_STAT_SERVICE_TIME, 1, 0);
1130 case BLKIO_PROP_io_wait_time: 1254 case BLKIO_PROP_io_wait_time:
1131 return blkio_read_blkg_stats(blkcg, cft, cb, 1255 return blkio_read_blkg_stats(blkcg, cft, cb,
1132 BLKIO_STAT_WAIT_TIME, 1); 1256 BLKIO_STAT_WAIT_TIME, 1, 0);
1133 case BLKIO_PROP_io_merged: 1257 case BLKIO_PROP_io_merged:
1134 return blkio_read_blkg_stats(blkcg, cft, cb, 1258 return blkio_read_blkg_stats(blkcg, cft, cb,
1135 BLKIO_STAT_MERGED, 1); 1259 BLKIO_STAT_CPU_MERGED, 1, 1);
1136 case BLKIO_PROP_io_queued: 1260 case BLKIO_PROP_io_queued:
1137 return blkio_read_blkg_stats(blkcg, cft, cb, 1261 return blkio_read_blkg_stats(blkcg, cft, cb,
1138 BLKIO_STAT_QUEUED, 1); 1262 BLKIO_STAT_QUEUED, 1, 0);
1139#ifdef CONFIG_DEBUG_BLK_CGROUP 1263#ifdef CONFIG_DEBUG_BLK_CGROUP
1140 case BLKIO_PROP_unaccounted_time: 1264 case BLKIO_PROP_unaccounted_time:
1141 return blkio_read_blkg_stats(blkcg, cft, cb, 1265 return blkio_read_blkg_stats(blkcg, cft, cb,
1142 BLKIO_STAT_UNACCOUNTED_TIME, 0); 1266 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1143 case BLKIO_PROP_dequeue: 1267 case BLKIO_PROP_dequeue:
1144 return blkio_read_blkg_stats(blkcg, cft, cb, 1268 return blkio_read_blkg_stats(blkcg, cft, cb,
1145 BLKIO_STAT_DEQUEUE, 0); 1269 BLKIO_STAT_DEQUEUE, 0, 0);
1146 case BLKIO_PROP_avg_queue_size: 1270 case BLKIO_PROP_avg_queue_size:
1147 return blkio_read_blkg_stats(blkcg, cft, cb, 1271 return blkio_read_blkg_stats(blkcg, cft, cb,
1148 BLKIO_STAT_AVG_QUEUE_SIZE, 0); 1272 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1149 case BLKIO_PROP_group_wait_time: 1273 case BLKIO_PROP_group_wait_time:
1150 return blkio_read_blkg_stats(blkcg, cft, cb, 1274 return blkio_read_blkg_stats(blkcg, cft, cb,
1151 BLKIO_STAT_GROUP_WAIT_TIME, 0); 1275 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1152 case BLKIO_PROP_idle_time: 1276 case BLKIO_PROP_idle_time:
1153 return blkio_read_blkg_stats(blkcg, cft, cb, 1277 return blkio_read_blkg_stats(blkcg, cft, cb,
1154 BLKIO_STAT_IDLE_TIME, 0); 1278 BLKIO_STAT_IDLE_TIME, 0, 0);
1155 case BLKIO_PROP_empty_time: 1279 case BLKIO_PROP_empty_time:
1156 return blkio_read_blkg_stats(blkcg, cft, cb, 1280 return blkio_read_blkg_stats(blkcg, cft, cb,
1157 BLKIO_STAT_EMPTY_TIME, 0); 1281 BLKIO_STAT_EMPTY_TIME, 0, 0);
1158#endif 1282#endif
1159 default: 1283 default:
1160 BUG(); 1284 BUG();
@@ -1164,10 +1288,10 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1164 switch(name){ 1288 switch(name){
1165 case BLKIO_THROTL_io_service_bytes: 1289 case BLKIO_THROTL_io_service_bytes:
1166 return blkio_read_blkg_stats(blkcg, cft, cb, 1290 return blkio_read_blkg_stats(blkcg, cft, cb,
1167 BLKIO_STAT_SERVICE_BYTES, 1); 1291 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1168 case BLKIO_THROTL_io_serviced: 1292 case BLKIO_THROTL_io_serviced:
1169 return blkio_read_blkg_stats(blkcg, cft, cb, 1293 return blkio_read_blkg_stats(blkcg, cft, cb,
1170 BLKIO_STAT_SERVICED, 1); 1294 BLKIO_STAT_CPU_SERVICED, 1, 1);
1171 default: 1295 default:
1172 BUG(); 1296 BUG();
1173 } 1297 }
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index c774930cc206..a71d2904ffb9 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/cgroup.h> 16#include <linux/cgroup.h>
17#include <linux/u64_stats_sync.h>
17 18
18enum blkio_policy_id { 19enum blkio_policy_id {
19 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */ 20 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
@@ -36,22 +37,15 @@ enum stat_type {
36 * request completion for IOs doen by this cgroup. This may not be 37 * request completion for IOs doen by this cgroup. This may not be
37 * accurate when NCQ is turned on. */ 38 * accurate when NCQ is turned on. */
38 BLKIO_STAT_SERVICE_TIME = 0, 39 BLKIO_STAT_SERVICE_TIME = 0,
39 /* Total bytes transferred */
40 BLKIO_STAT_SERVICE_BYTES,
41 /* Total IOs serviced, post merge */
42 BLKIO_STAT_SERVICED,
43 /* Total time spent waiting in scheduler queue in ns */ 40 /* Total time spent waiting in scheduler queue in ns */
44 BLKIO_STAT_WAIT_TIME, 41 BLKIO_STAT_WAIT_TIME,
45 /* Number of IOs merged */
46 BLKIO_STAT_MERGED,
47 /* Number of IOs queued up */ 42 /* Number of IOs queued up */
48 BLKIO_STAT_QUEUED, 43 BLKIO_STAT_QUEUED,
49 /* All the single valued stats go below this */ 44 /* All the single valued stats go below this */
50 BLKIO_STAT_TIME, 45 BLKIO_STAT_TIME,
51 BLKIO_STAT_SECTORS, 46#ifdef CONFIG_DEBUG_BLK_CGROUP
52 /* Time not charged to this cgroup */ 47 /* Time not charged to this cgroup */
53 BLKIO_STAT_UNACCOUNTED_TIME, 48 BLKIO_STAT_UNACCOUNTED_TIME,
54#ifdef CONFIG_DEBUG_BLK_CGROUP
55 BLKIO_STAT_AVG_QUEUE_SIZE, 49 BLKIO_STAT_AVG_QUEUE_SIZE,
56 BLKIO_STAT_IDLE_TIME, 50 BLKIO_STAT_IDLE_TIME,
57 BLKIO_STAT_EMPTY_TIME, 51 BLKIO_STAT_EMPTY_TIME,
@@ -60,6 +54,18 @@ enum stat_type {
60#endif 54#endif
61}; 55};
62 56
57/* Per cpu stats */
58enum stat_type_cpu {
59 BLKIO_STAT_CPU_SECTORS,
60 /* Total bytes transferred */
61 BLKIO_STAT_CPU_SERVICE_BYTES,
62 /* Total IOs serviced, post merge */
63 BLKIO_STAT_CPU_SERVICED,
64 /* Number of IOs merged */
65 BLKIO_STAT_CPU_MERGED,
66 BLKIO_STAT_CPU_NR
67};
68
63enum stat_sub_type { 69enum stat_sub_type {
64 BLKIO_STAT_READ = 0, 70 BLKIO_STAT_READ = 0,
65 BLKIO_STAT_WRITE, 71 BLKIO_STAT_WRITE,
@@ -116,11 +122,11 @@ struct blkio_cgroup {
116struct blkio_group_stats { 122struct blkio_group_stats {
117 /* total disk time and nr sectors dispatched by this group */ 123 /* total disk time and nr sectors dispatched by this group */
118 uint64_t time; 124 uint64_t time;
119 uint64_t sectors;
120 /* Time not charged to this cgroup */
121 uint64_t unaccounted_time;
122 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; 125 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
123#ifdef CONFIG_DEBUG_BLK_CGROUP 126#ifdef CONFIG_DEBUG_BLK_CGROUP
127 /* Time not charged to this cgroup */
128 uint64_t unaccounted_time;
129
124 /* Sum of number of IOs queued across all samples */ 130 /* Sum of number of IOs queued across all samples */
125 uint64_t avg_queue_size_sum; 131 uint64_t avg_queue_size_sum;
126 /* Count of samples taken for average */ 132 /* Count of samples taken for average */
@@ -145,6 +151,13 @@ struct blkio_group_stats {
145#endif 151#endif
146}; 152};
147 153
154/* Per cpu blkio group stats */
155struct blkio_group_stats_cpu {
156 uint64_t sectors;
157 uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
158 struct u64_stats_sync syncp;
159};
160
148struct blkio_group { 161struct blkio_group {
149 /* An rcu protected unique identifier for the group */ 162 /* An rcu protected unique identifier for the group */
150 void *key; 163 void *key;
@@ -160,6 +173,8 @@ struct blkio_group {
160 /* Need to serialize the stats in the case of reset/update */ 173 /* Need to serialize the stats in the case of reset/update */
161 spinlock_t stats_lock; 174 spinlock_t stats_lock;
162 struct blkio_group_stats stats; 175 struct blkio_group_stats stats;
176 /* Per cpu stats pointer */
177 struct blkio_group_stats_cpu __percpu *stats_cpu;
163}; 178};
164 179
165struct blkio_policy_node { 180struct blkio_policy_node {
@@ -295,6 +310,7 @@ extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
295extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 310extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
296 struct blkio_group *blkg, void *key, dev_t dev, 311 struct blkio_group *blkg, void *key, dev_t dev,
297 enum blkio_policy_id plid); 312 enum blkio_policy_id plid);
313extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
298extern int blkiocg_del_blkio_group(struct blkio_group *blkg); 314extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
299extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, 315extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
300 void *key); 316 void *key);
@@ -322,6 +338,8 @@ static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
322 struct blkio_group *blkg, void *key, dev_t dev, 338 struct blkio_group *blkg, void *key, dev_t dev,
323 enum blkio_policy_id plid) {} 339 enum blkio_policy_id plid) {}
324 340
341static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
342
325static inline int 343static inline int
326blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; } 344blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
327 345
diff --git a/block/blk-core.c b/block/blk-core.c
index 3fe00a14822a..c8303e9d919d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -569,8 +569,6 @@ int blk_get_queue(struct request_queue *q)
569 569
570static inline void blk_free_request(struct request_queue *q, struct request *rq) 570static inline void blk_free_request(struct request_queue *q, struct request *rq)
571{ 571{
572 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
573
574 if (rq->cmd_flags & REQ_ELVPRIV) 572 if (rq->cmd_flags & REQ_ELVPRIV)
575 elv_put_request(q, rq); 573 elv_put_request(q, rq);
576 mempool_free(rq, q->rq.rq_pool); 574 mempool_free(rq, q->rq.rq_pool);
@@ -1110,14 +1108,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1110{ 1108{
1111 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1109 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1112 1110
1113 /*
1114 * Debug stuff, kill later
1115 */
1116 if (!rq_mergeable(req)) {
1117 blk_dump_rq_flags(req, "back");
1118 return false;
1119 }
1120
1121 if (!ll_back_merge_fn(q, req, bio)) 1111 if (!ll_back_merge_fn(q, req, bio))
1122 return false; 1112 return false;
1123 1113
@@ -1132,6 +1122,7 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1132 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1122 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1133 1123
1134 drive_stat_acct(req, 0); 1124 drive_stat_acct(req, 0);
1125 elv_bio_merged(q, req, bio);
1135 return true; 1126 return true;
1136} 1127}
1137 1128
@@ -1141,14 +1132,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1141 const int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1132 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1142 sector_t sector; 1133 sector_t sector;
1143 1134
1144 /*
1145 * Debug stuff, kill later
1146 */
1147 if (!rq_mergeable(req)) {
1148 blk_dump_rq_flags(req, "front");
1149 return false;
1150 }
1151
1152 if (!ll_front_merge_fn(q, req, bio)) 1135 if (!ll_front_merge_fn(q, req, bio))
1153 return false; 1136 return false;
1154 1137
@@ -1173,6 +1156,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
1173 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1156 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1174 1157
1175 drive_stat_acct(req, 0); 1158 drive_stat_acct(req, 0);
1159 elv_bio_merged(q, req, bio);
1176 return true; 1160 return true;
1177} 1161}
1178 1162
@@ -1258,14 +1242,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1258 1242
1259 el_ret = elv_merge(q, &req, bio); 1243 el_ret = elv_merge(q, &req, bio);
1260 if (el_ret == ELEVATOR_BACK_MERGE) { 1244 if (el_ret == ELEVATOR_BACK_MERGE) {
1261 BUG_ON(req->cmd_flags & REQ_ON_PLUG);
1262 if (bio_attempt_back_merge(q, req, bio)) { 1245 if (bio_attempt_back_merge(q, req, bio)) {
1263 if (!attempt_back_merge(q, req)) 1246 if (!attempt_back_merge(q, req))
1264 elv_merged_request(q, req, el_ret); 1247 elv_merged_request(q, req, el_ret);
1265 goto out_unlock; 1248 goto out_unlock;
1266 } 1249 }
1267 } else if (el_ret == ELEVATOR_FRONT_MERGE) { 1250 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1268 BUG_ON(req->cmd_flags & REQ_ON_PLUG);
1269 if (bio_attempt_front_merge(q, req, bio)) { 1251 if (bio_attempt_front_merge(q, req, bio)) {
1270 if (!attempt_front_merge(q, req)) 1252 if (!attempt_front_merge(q, req))
1271 elv_merged_request(q, req, el_ret); 1253 elv_merged_request(q, req, el_ret);
@@ -1320,10 +1302,6 @@ get_rq:
1320 if (__rq->q != q) 1302 if (__rq->q != q)
1321 plug->should_sort = 1; 1303 plug->should_sort = 1;
1322 } 1304 }
1323 /*
1324 * Debug flag, kill later
1325 */
1326 req->cmd_flags |= REQ_ON_PLUG;
1327 list_add_tail(&req->queuelist, &plug->list); 1305 list_add_tail(&req->queuelist, &plug->list);
1328 drive_stat_acct(req, 1); 1306 drive_stat_acct(req, 1);
1329 } else { 1307 } else {
@@ -1550,7 +1528,8 @@ static inline void __generic_make_request(struct bio *bio)
1550 goto end_io; 1528 goto end_io;
1551 } 1529 }
1552 1530
1553 blk_throtl_bio(q, &bio); 1531 if (blk_throtl_bio(q, &bio))
1532 goto end_io;
1554 1533
1555 /* 1534 /*
1556 * If bio = NULL, bio has been throttled and will be submitted 1535 * If bio = NULL, bio has been throttled and will be submitted
@@ -2748,7 +2727,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2748 while (!list_empty(&list)) { 2727 while (!list_empty(&list)) {
2749 rq = list_entry_rq(list.next); 2728 rq = list_entry_rq(list.next);
2750 list_del_init(&rq->queuelist); 2729 list_del_init(&rq->queuelist);
2751 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2752 BUG_ON(!rq->q); 2730 BUG_ON(!rq->q);
2753 if (rq->q != q) { 2731 if (rq->q != q) {
2754 /* 2732 /*
@@ -2760,8 +2738,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2760 depth = 0; 2738 depth = 0;
2761 spin_lock(q->queue_lock); 2739 spin_lock(q->queue_lock);
2762 } 2740 }
2763 rq->cmd_flags &= ~REQ_ON_PLUG;
2764
2765 /* 2741 /*
2766 * rq is already accounted, so use raw insert 2742 * rq is already accounted, so use raw insert
2767 */ 2743 */
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 81e31819a597..8a0e7ec056e7 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -56,7 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 __elv_add_request(q, rq, where); 57 __elv_add_request(q, rq, where);
58 __blk_run_queue(q); 58 __blk_run_queue(q);
59 /* the queue is stopped so it won't be plugged+unplugged */ 59 /* the queue is stopped so it won't be run */
60 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
61 q->request_fn(q); 61 q->request_fn(q);
62 spin_unlock_irq(q->queue_lock); 62 spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6c9b5e189e62..bb21e4c36f70 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -212,13 +212,19 @@ static void flush_end_io(struct request *flush_rq, int error)
212 } 212 }
213 213
214 /* 214 /*
215 * Moving a request silently to empty queue_head may stall the 215 * Kick the queue to avoid stall for two cases:
216 * queue. Kick the queue in those cases. This function is called 216 * 1. Moving a request silently to empty queue_head may stall the
217 * from request completion path and calling directly into 217 * queue.
218 * request_fn may confuse the driver. Always use kblockd. 218 * 2. When flush request is running in non-queueable queue, the
219 * queue is hold. Restart the queue after flush request is finished
220 * to avoid stall.
221 * This function is called from request completion path and calling
222 * directly into request_fn may confuse the driver. Always use
223 * kblockd.
219 */ 224 */
220 if (queued) 225 if (queued || q->flush_queue_delayed)
221 blk_run_queue_async(q); 226 blk_run_queue_async(q);
227 q->flush_queue_delayed = 0;
222} 228}
223 229
224/** 230/**
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index b791022beef3..c898049dafd5 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -96,6 +96,9 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
96 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 96 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
97 INIT_HLIST_HEAD(&ret->cic_list); 97 INIT_HLIST_HEAD(&ret->cic_list);
98 ret->ioc_data = NULL; 98 ret->ioc_data = NULL;
99#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
100 ret->cgroup_changed = 0;
101#endif
99 } 102 }
100 103
101 return ret; 104 return ret;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 25de73e4759b..78e627e2581d 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -9,17 +9,20 @@
9 9
10#include "blk.h" 10#include "blk.h"
11 11
12static void blkdev_discard_end_io(struct bio *bio, int err) 12struct bio_batch {
13{ 13 atomic_t done;
14 if (err) { 14 unsigned long flags;
15 if (err == -EOPNOTSUPP) 15 struct completion *wait;
16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 16};
17 clear_bit(BIO_UPTODATE, &bio->bi_flags);
18 }
19 17
20 if (bio->bi_private) 18static void bio_batch_end_io(struct bio *bio, int err)
21 complete(bio->bi_private); 19{
20 struct bio_batch *bb = bio->bi_private;
22 21
22 if (err && (err != -EOPNOTSUPP))
23 clear_bit(BIO_UPTODATE, &bb->flags);
24 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
23 bio_put(bio); 26 bio_put(bio);
24} 27}
25 28
@@ -41,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 struct request_queue *q = bdev_get_queue(bdev); 44 struct request_queue *q = bdev_get_queue(bdev);
42 int type = REQ_WRITE | REQ_DISCARD; 45 int type = REQ_WRITE | REQ_DISCARD;
43 unsigned int max_discard_sectors; 46 unsigned int max_discard_sectors;
47 struct bio_batch bb;
44 struct bio *bio; 48 struct bio *bio;
45 int ret = 0; 49 int ret = 0;
46 50
@@ -67,7 +71,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
67 type |= REQ_SECURE; 71 type |= REQ_SECURE;
68 } 72 }
69 73
70 while (nr_sects && !ret) { 74 atomic_set(&bb.done, 1);
75 bb.flags = 1 << BIO_UPTODATE;
76 bb.wait = &wait;
77
78 while (nr_sects) {
71 bio = bio_alloc(gfp_mask, 1); 79 bio = bio_alloc(gfp_mask, 1);
72 if (!bio) { 80 if (!bio) {
73 ret = -ENOMEM; 81 ret = -ENOMEM;
@@ -75,9 +83,9 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
75 } 83 }
76 84
77 bio->bi_sector = sector; 85 bio->bi_sector = sector;
78 bio->bi_end_io = blkdev_discard_end_io; 86 bio->bi_end_io = bio_batch_end_io;
79 bio->bi_bdev = bdev; 87 bio->bi_bdev = bdev;
80 bio->bi_private = &wait; 88 bio->bi_private = &bb;
81 89
82 if (nr_sects > max_discard_sectors) { 90 if (nr_sects > max_discard_sectors) {
83 bio->bi_size = max_discard_sectors << 9; 91 bio->bi_size = max_discard_sectors << 9;
@@ -88,45 +96,21 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
88 nr_sects = 0; 96 nr_sects = 0;
89 } 97 }
90 98
91 bio_get(bio); 99 atomic_inc(&bb.done);
92 submit_bio(type, bio); 100 submit_bio(type, bio);
101 }
93 102
103 /* Wait for bios in-flight */
104 if (!atomic_dec_and_test(&bb.done))
94 wait_for_completion(&wait); 105 wait_for_completion(&wait);
95 106
96 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 107 if (!test_bit(BIO_UPTODATE, &bb.flags))
97 ret = -EOPNOTSUPP; 108 ret = -EIO;
98 else if (!bio_flagged(bio, BIO_UPTODATE))
99 ret = -EIO;
100 bio_put(bio);
101 }
102 109
103 return ret; 110 return ret;
104} 111}
105EXPORT_SYMBOL(blkdev_issue_discard); 112EXPORT_SYMBOL(blkdev_issue_discard);
106 113
107struct bio_batch
108{
109 atomic_t done;
110 unsigned long flags;
111 struct completion *wait;
112};
113
114static void bio_batch_end_io(struct bio *bio, int err)
115{
116 struct bio_batch *bb = bio->bi_private;
117
118 if (err) {
119 if (err == -EOPNOTSUPP)
120 set_bit(BIO_EOPNOTSUPP, &bb->flags);
121 else
122 clear_bit(BIO_UPTODATE, &bb->flags);
123 }
124 if (bb)
125 if (atomic_dec_and_test(&bb->done))
126 complete(bb->wait);
127 bio_put(bio);
128}
129
130/** 114/**
131 * blkdev_issue_zeroout - generate number of zero filed write bios 115 * blkdev_issue_zeroout - generate number of zero filed write bios
132 * @bdev: blockdev to issue 116 * @bdev: blockdev to issue
@@ -151,7 +135,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
151 bb.flags = 1 << BIO_UPTODATE; 135 bb.flags = 1 << BIO_UPTODATE;
152 bb.wait = &wait; 136 bb.wait = &wait;
153 137
154submit:
155 ret = 0; 138 ret = 0;
156 while (nr_sects != 0) { 139 while (nr_sects != 0) {
157 bio = bio_alloc(gfp_mask, 140 bio = bio_alloc(gfp_mask,
@@ -168,9 +151,6 @@ submit:
168 151
169 while (nr_sects != 0) { 152 while (nr_sects != 0) {
170 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); 153 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
171 if (sz == 0)
172 /* bio has maximum size possible */
173 break;
174 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); 154 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
175 nr_sects -= ret >> 9; 155 nr_sects -= ret >> 9;
176 sector += ret >> 9; 156 sector += ret >> 9;
@@ -190,16 +170,6 @@ submit:
190 /* One of bios in the batch was completed with error.*/ 170 /* One of bios in the batch was completed with error.*/
191 ret = -EIO; 171 ret = -EIO;
192 172
193 if (ret)
194 goto out;
195
196 if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
197 ret = -EOPNOTSUPP;
198 goto out;
199 }
200 if (nr_sects != 0)
201 goto submit;
202out:
203 return ret; 173 return ret;
204} 174}
205EXPORT_SYMBOL(blkdev_issue_zeroout); 175EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 1fa769293597..fa1eb0449a05 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -120,7 +120,7 @@ void blk_set_default_limits(struct queue_limits *lim)
120 lim->discard_granularity = 0; 120 lim->discard_granularity = 0;
121 lim->discard_alignment = 0; 121 lim->discard_alignment = 0;
122 lim->discard_misaligned = 0; 122 lim->discard_misaligned = 0;
123 lim->discard_zeroes_data = -1; 123 lim->discard_zeroes_data = 1;
124 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 124 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
125 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); 125 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
126 lim->alignment_offset = 0; 126 lim->alignment_offset = 0;
@@ -166,6 +166,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
166 166
167 blk_set_default_limits(&q->limits); 167 blk_set_default_limits(&q->limits);
168 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); 168 blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
169 q->limits.discard_zeroes_data = 0;
169 170
170 /* 171 /*
171 * by default assume old behaviour and bounce for any highmem page 172 * by default assume old behaviour and bounce for any highmem page
@@ -790,6 +791,12 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush)
790} 791}
791EXPORT_SYMBOL_GPL(blk_queue_flush); 792EXPORT_SYMBOL_GPL(blk_queue_flush);
792 793
794void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
795{
796 q->flush_not_queueable = !queueable;
797}
798EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
799
793static int __init blk_settings_init(void) 800static int __init blk_settings_init(void)
794{ 801{
795 blk_max_low_pfn = max_low_pfn - 1; 802 blk_max_low_pfn = max_low_pfn - 1;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index bd236313f35d..d935bd859c87 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -152,7 +152,8 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag
152 152
153static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 153static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
154{ 154{
155 return queue_var_show(q->limits.max_discard_sectors << 9, page); 155 return sprintf(page, "%llu\n",
156 (unsigned long long)q->limits.max_discard_sectors << 9);
156} 157}
157 158
158static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) 159static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 252a81a306f7..a62be8d0dc1b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -78,6 +78,8 @@ struct throtl_grp {
78 78
79 /* Some throttle limits got updated for the group */ 79 /* Some throttle limits got updated for the group */
80 int limits_changed; 80 int limits_changed;
81
82 struct rcu_head rcu_head;
81}; 83};
82 84
83struct throtl_data 85struct throtl_data
@@ -88,7 +90,7 @@ struct throtl_data
88 /* service tree for active throtl groups */ 90 /* service tree for active throtl groups */
89 struct throtl_rb_root tg_service_tree; 91 struct throtl_rb_root tg_service_tree;
90 92
91 struct throtl_grp root_tg; 93 struct throtl_grp *root_tg;
92 struct request_queue *queue; 94 struct request_queue *queue;
93 95
94 /* Total Number of queued bios on READ and WRITE lists */ 96 /* Total Number of queued bios on READ and WRITE lists */
@@ -151,56 +153,44 @@ static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
151 return tg; 153 return tg;
152} 154}
153 155
154static void throtl_put_tg(struct throtl_grp *tg) 156static void throtl_free_tg(struct rcu_head *head)
155{ 157{
156 BUG_ON(atomic_read(&tg->ref) <= 0); 158 struct throtl_grp *tg;
157 if (!atomic_dec_and_test(&tg->ref)) 159
158 return; 160 tg = container_of(head, struct throtl_grp, rcu_head);
161 free_percpu(tg->blkg.stats_cpu);
159 kfree(tg); 162 kfree(tg);
160} 163}
161 164
162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, 165static void throtl_put_tg(struct throtl_grp *tg)
163 struct blkio_cgroup *blkcg)
164{ 166{
165 struct throtl_grp *tg = NULL; 167 BUG_ON(atomic_read(&tg->ref) <= 0);
166 void *key = td; 168 if (!atomic_dec_and_test(&tg->ref))
167 struct backing_dev_info *bdi = &td->queue->backing_dev_info; 169 return;
168 unsigned int major, minor;
169 170
170 /* 171 /*
171 * TODO: Speed up blkiocg_lookup_group() by maintaining a radix 172 * A group is freed in rcu manner. But having an rcu lock does not
172 * tree of blkg (instead of traversing through hash list all 173 * mean that one can access all the fields of blkg and assume these
173 * the time. 174 * are valid. For example, don't try to follow throtl_data and
175 * request queue links.
176 *
177 * Having a reference to blkg under an rcu allows acess to only
178 * values local to groups like group stats and group rate limits
174 */ 179 */
180 call_rcu(&tg->rcu_head, throtl_free_tg);
181}
175 182
176 /* 183static void throtl_init_group(struct throtl_grp *tg)
177 * This is the common case when there are no blkio cgroups. 184{
178 * Avoid lookup in this case
179 */
180 if (blkcg == &blkio_root_cgroup)
181 tg = &td->root_tg;
182 else
183 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
184
185 /* Fill in device details for root group */
186 if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
187 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
188 tg->blkg.dev = MKDEV(major, minor);
189 goto done;
190 }
191
192 if (tg)
193 goto done;
194
195 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
196 if (!tg)
197 goto done;
198
199 INIT_HLIST_NODE(&tg->tg_node); 185 INIT_HLIST_NODE(&tg->tg_node);
200 RB_CLEAR_NODE(&tg->rb_node); 186 RB_CLEAR_NODE(&tg->rb_node);
201 bio_list_init(&tg->bio_lists[0]); 187 bio_list_init(&tg->bio_lists[0]);
202 bio_list_init(&tg->bio_lists[1]); 188 bio_list_init(&tg->bio_lists[1]);
203 td->limits_changed = false; 189 tg->limits_changed = false;
190
191 /* Practically unlimited BW */
192 tg->bps[0] = tg->bps[1] = -1;
193 tg->iops[0] = tg->iops[1] = -1;
204 194
205 /* 195 /*
206 * Take the initial reference that will be released on destroy 196 * Take the initial reference that will be released on destroy
@@ -209,33 +199,181 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
209 * exit or cgroup deletion path depending on who is exiting first. 199 * exit or cgroup deletion path depending on who is exiting first.
210 */ 200 */
211 atomic_set(&tg->ref, 1); 201 atomic_set(&tg->ref, 1);
202}
203
204/* Should be called with rcu read lock held (needed for blkcg) */
205static void
206throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
207{
208 hlist_add_head(&tg->tg_node, &td->tg_list);
209 td->nr_undestroyed_grps++;
210}
211
212static void
213__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
214{
215 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
216 unsigned int major, minor;
217
218 if (!tg || tg->blkg.dev)
219 return;
220
221 /*
222 * Fill in device details for a group which might not have been
223 * filled at group creation time as queue was being instantiated
224 * and driver had not attached a device yet
225 */
226 if (bdi->dev && dev_name(bdi->dev)) {
227 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
228 tg->blkg.dev = MKDEV(major, minor);
229 }
230}
231
232/*
233 * Should be called with without queue lock held. Here queue lock will be
234 * taken rarely. It will be taken only once during life time of a group
235 * if need be
236 */
237static void
238throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
239{
240 if (!tg || tg->blkg.dev)
241 return;
242
243 spin_lock_irq(td->queue->queue_lock);
244 __throtl_tg_fill_dev_details(td, tg);
245 spin_unlock_irq(td->queue->queue_lock);
246}
247
248static void throtl_init_add_tg_lists(struct throtl_data *td,
249 struct throtl_grp *tg, struct blkio_cgroup *blkcg)
250{
251 __throtl_tg_fill_dev_details(td, tg);
212 252
213 /* Add group onto cgroup list */ 253 /* Add group onto cgroup list */
214 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
215 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td, 254 blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
216 MKDEV(major, minor), BLKIO_POLICY_THROTL); 255 tg->blkg.dev, BLKIO_POLICY_THROTL);
217 256
218 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev); 257 tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
219 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev); 258 tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
220 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev); 259 tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
221 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev); 260 tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
222 261
223 hlist_add_head(&tg->tg_node, &td->tg_list); 262 throtl_add_group_to_td_list(td, tg);
224 td->nr_undestroyed_grps++; 263}
225done: 264
265/* Should be called without queue lock and outside of rcu period */
266static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
267{
268 struct throtl_grp *tg = NULL;
269 int ret;
270
271 tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
272 if (!tg)
273 return NULL;
274
275 ret = blkio_alloc_blkg_stats(&tg->blkg);
276
277 if (ret) {
278 kfree(tg);
279 return NULL;
280 }
281
282 throtl_init_group(tg);
226 return tg; 283 return tg;
227} 284}
228 285
229static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 286static struct
287throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
230{ 288{
231 struct throtl_grp *tg = NULL; 289 struct throtl_grp *tg = NULL;
290 void *key = td;
291
292 /*
293 * This is the common case when there are no blkio cgroups.
294 * Avoid lookup in this case
295 */
296 if (blkcg == &blkio_root_cgroup)
297 tg = td->root_tg;
298 else
299 tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
300
301 __throtl_tg_fill_dev_details(td, tg);
302 return tg;
303}
304
305/*
306 * This function returns with queue lock unlocked in case of error, like
307 * request queue is no more
308 */
309static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
310{
311 struct throtl_grp *tg = NULL, *__tg = NULL;
232 struct blkio_cgroup *blkcg; 312 struct blkio_cgroup *blkcg;
313 struct request_queue *q = td->queue;
233 314
234 rcu_read_lock(); 315 rcu_read_lock();
235 blkcg = task_blkio_cgroup(current); 316 blkcg = task_blkio_cgroup(current);
236 tg = throtl_find_alloc_tg(td, blkcg); 317 tg = throtl_find_tg(td, blkcg);
237 if (!tg) 318 if (tg) {
238 tg = &td->root_tg; 319 rcu_read_unlock();
320 return tg;
321 }
322
323 /*
324 * Need to allocate a group. Allocation of group also needs allocation
325 * of per cpu stats which in-turn takes a mutex() and can block. Hence
326 * we need to drop rcu lock and queue_lock before we call alloc
327 *
328 * Take the request queue reference to make sure queue does not
329 * go away once we return from allocation.
330 */
331 blk_get_queue(q);
332 rcu_read_unlock();
333 spin_unlock_irq(q->queue_lock);
334
335 tg = throtl_alloc_tg(td);
336 /*
337 * We might have slept in group allocation. Make sure queue is not
338 * dead
339 */
340 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
341 blk_put_queue(q);
342 if (tg)
343 kfree(tg);
344
345 return ERR_PTR(-ENODEV);
346 }
347 blk_put_queue(q);
348
349 /* Group allocated and queue is still alive. take the lock */
350 spin_lock_irq(q->queue_lock);
351
352 /*
353 * Initialize the new group. After sleeping, read the blkcg again.
354 */
355 rcu_read_lock();
356 blkcg = task_blkio_cgroup(current);
357
358 /*
359 * If some other thread already allocated the group while we were
360 * not holding queue lock, free up the group
361 */
362 __tg = throtl_find_tg(td, blkcg);
363
364 if (__tg) {
365 kfree(tg);
366 rcu_read_unlock();
367 return __tg;
368 }
369
370 /* Group allocation failed. Account the IO to root group */
371 if (!tg) {
372 tg = td->root_tg;
373 return tg;
374 }
375
376 throtl_init_add_tg_lists(td, tg, blkcg);
239 rcu_read_unlock(); 377 rcu_read_unlock();
240 return tg; 378 return tg;
241} 379}
@@ -544,6 +682,12 @@ static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
544 return 0; 682 return 0;
545} 683}
546 684
685static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
686 if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
687 return 1;
688 return 0;
689}
690
547/* 691/*
548 * Returns whether one can dispatch a bio or not. Also returns approx number 692 * Returns whether one can dispatch a bio or not. Also returns approx number
549 * of jiffies to wait before this bio is with-in IO rate and can be dispatched 693 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
@@ -608,10 +752,6 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
608 tg->bytes_disp[rw] += bio->bi_size; 752 tg->bytes_disp[rw] += bio->bi_size;
609 tg->io_disp[rw]++; 753 tg->io_disp[rw]++;
610 754
611 /*
612 * TODO: This will take blkg->stats_lock. Figure out a way
613 * to avoid this cost.
614 */
615 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync); 755 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
616} 756}
617 757
@@ -989,15 +1129,51 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
989 struct throtl_grp *tg; 1129 struct throtl_grp *tg;
990 struct bio *bio = *biop; 1130 struct bio *bio = *biop;
991 bool rw = bio_data_dir(bio), update_disptime = true; 1131 bool rw = bio_data_dir(bio), update_disptime = true;
1132 struct blkio_cgroup *blkcg;
992 1133
993 if (bio->bi_rw & REQ_THROTTLED) { 1134 if (bio->bi_rw & REQ_THROTTLED) {
994 bio->bi_rw &= ~REQ_THROTTLED; 1135 bio->bi_rw &= ~REQ_THROTTLED;
995 return 0; 1136 return 0;
996 } 1137 }
997 1138
1139 /*
1140 * A throtl_grp pointer retrieved under rcu can be used to access
1141 * basic fields like stats and io rates. If a group has no rules,
1142 * just update the dispatch stats in lockless manner and return.
1143 */
1144
1145 rcu_read_lock();
1146 blkcg = task_blkio_cgroup(current);
1147 tg = throtl_find_tg(td, blkcg);
1148 if (tg) {
1149 throtl_tg_fill_dev_details(td, tg);
1150
1151 if (tg_no_rule_group(tg, rw)) {
1152 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1153 rw, bio->bi_rw & REQ_SYNC);
1154 rcu_read_unlock();
1155 return 0;
1156 }
1157 }
1158 rcu_read_unlock();
1159
1160 /*
1161 * Either group has not been allocated yet or it is not an unlimited
1162 * IO group
1163 */
1164
998 spin_lock_irq(q->queue_lock); 1165 spin_lock_irq(q->queue_lock);
999 tg = throtl_get_tg(td); 1166 tg = throtl_get_tg(td);
1000 1167
1168 if (IS_ERR(tg)) {
1169 if (PTR_ERR(tg) == -ENODEV) {
1170 /*
1171 * Queue is gone. No queue lock held here.
1172 */
1173 return -ENODEV;
1174 }
1175 }
1176
1001 if (tg->nr_queued[rw]) { 1177 if (tg->nr_queued[rw]) {
1002 /* 1178 /*
1003 * There is already another bio queued in same dir. No 1179 * There is already another bio queued in same dir. No
@@ -1060,39 +1236,24 @@ int blk_throtl_init(struct request_queue *q)
1060 INIT_HLIST_HEAD(&td->tg_list); 1236 INIT_HLIST_HEAD(&td->tg_list);
1061 td->tg_service_tree = THROTL_RB_ROOT; 1237 td->tg_service_tree = THROTL_RB_ROOT;
1062 td->limits_changed = false; 1238 td->limits_changed = false;
1239 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1063 1240
1064 /* Init root group */ 1241 /* alloc and Init root group. */
1065 tg = &td->root_tg; 1242 td->queue = q;
1066 INIT_HLIST_NODE(&tg->tg_node); 1243 tg = throtl_alloc_tg(td);
1067 RB_CLEAR_NODE(&tg->rb_node);
1068 bio_list_init(&tg->bio_lists[0]);
1069 bio_list_init(&tg->bio_lists[1]);
1070
1071 /* Practically unlimited BW */
1072 tg->bps[0] = tg->bps[1] = -1;
1073 tg->iops[0] = tg->iops[1] = -1;
1074 td->limits_changed = false;
1075 1244
1076 /* 1245 if (!tg) {
1077 * Set root group reference to 2. One reference will be dropped when 1246 kfree(td);
1078 * all groups on tg_list are being deleted during queue exit. Other 1247 return -ENOMEM;
1079 * reference will remain there as we don't want to delete this group 1248 }
1080 * as it is statically allocated and gets destroyed when throtl_data
1081 * goes away.
1082 */
1083 atomic_set(&tg->ref, 2);
1084 hlist_add_head(&tg->tg_node, &td->tg_list);
1085 td->nr_undestroyed_grps++;
1086 1249
1087 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); 1250 td->root_tg = tg;
1088 1251
1089 rcu_read_lock(); 1252 rcu_read_lock();
1090 blkiocg_add_blkio_group(&blkio_root_cgroup, &tg->blkg, (void *)td, 1253 throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
1091 0, BLKIO_POLICY_THROTL);
1092 rcu_read_unlock(); 1254 rcu_read_unlock();
1093 1255
1094 /* Attach throtl data to request queue */ 1256 /* Attach throtl data to request queue */
1095 td->queue = q;
1096 q->td = td; 1257 q->td = td;
1097 return 0; 1258 return 0;
1098} 1259}
diff --git a/block/blk.h b/block/blk.h
index 61263463e38e..d6586287adc9 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -62,7 +62,28 @@ static inline struct request *__elv_next_request(struct request_queue *q)
62 return rq; 62 return rq;
63 } 63 }
64 64
65 if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) 65 /*
66 * Flush request is running and flush request isn't queueable
67 * in the drive, we can hold the queue till flush request is
68 * finished. Even we don't do this, driver can't dispatch next
69 * requests and will requeue them. And this can improve
70 * throughput too. For example, we have request flush1, write1,
71 * flush 2. flush1 is dispatched, then queue is hold, write1
72 * isn't inserted to queue. After flush1 is finished, flush2
73 * will be dispatched. Since disk cache is already clean,
74 * flush2 will be finished very soon, so looks like flush2 is
75 * folded to flush1.
76 * Since the queue is hold, a flag is set to indicate the queue
77 * should be restarted later. Please see flush_end_io() for
78 * details.
79 */
80 if (q->flush_pending_idx != q->flush_running_idx &&
81 !queue_flush_queueable(q)) {
82 q->flush_queue_delayed = 1;
83 return NULL;
84 }
85 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
86 !q->elevator->ops->elevator_dispatch_fn(q, 0))
66 return NULL; 87 return NULL;
67 } 88 }
68} 89}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ab7a9e6a9b1c..7c52d6888924 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -300,7 +300,9 @@ struct cfq_data {
300 300
301 /* List of cfq groups being managed on this device*/ 301 /* List of cfq groups being managed on this device*/
302 struct hlist_head cfqg_list; 302 struct hlist_head cfqg_list;
303 struct rcu_head rcu; 303
304 /* Number of groups which are on blkcg->blkg_list */
305 unsigned int nr_blkcg_linked_grps;
304}; 306};
305 307
306static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); 308static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
@@ -665,15 +667,11 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
665 if (rq2 == NULL) 667 if (rq2 == NULL)
666 return rq1; 668 return rq1;
667 669
668 if (rq_is_sync(rq1) && !rq_is_sync(rq2)) 670 if (rq_is_sync(rq1) != rq_is_sync(rq2))
669 return rq1; 671 return rq_is_sync(rq1) ? rq1 : rq2;
670 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 672
671 return rq2; 673 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META)
672 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) 674 return rq1->cmd_flags & REQ_META ? rq1 : rq2;
673 return rq1;
674 else if ((rq2->cmd_flags & REQ_META) &&
675 !(rq1->cmd_flags & REQ_META))
676 return rq2;
677 675
678 s1 = blk_rq_pos(rq1); 676 s1 = blk_rq_pos(rq1);
679 s2 = blk_rq_pos(rq2); 677 s2 = blk_rq_pos(rq2);
@@ -1014,28 +1012,47 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1014 cfqg->needs_update = true; 1012 cfqg->needs_update = true;
1015} 1013}
1016 1014
1017static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd, 1015static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
1018 struct blkio_cgroup *blkcg, int create) 1016 struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
1019{ 1017{
1020 struct cfq_group *cfqg = NULL;
1021 void *key = cfqd;
1022 int i, j;
1023 struct cfq_rb_root *st;
1024 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; 1018 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1025 unsigned int major, minor; 1019 unsigned int major, minor;
1026 1020
1027 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); 1021 /*
1028 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { 1022 * Add group onto cgroup list. It might happen that bdi->dev is
1023 * not initialized yet. Initialize this new group without major
1024 * and minor info and this info will be filled in once a new thread
1025 * comes for IO.
1026 */
1027 if (bdi->dev) {
1029 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 1028 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1030 cfqg->blkg.dev = MKDEV(major, minor); 1029 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1031 goto done; 1030 (void *)cfqd, MKDEV(major, minor));
1032 } 1031 } else
1033 if (cfqg || !create) 1032 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1034 goto done; 1033 (void *)cfqd, 0);
1034
1035 cfqd->nr_blkcg_linked_grps++;
1036 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1037
1038 /* Add group on cfqd list */
1039 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1040}
1041
1042/*
1043 * Should be called from sleepable context. No request queue lock as per
1044 * cpu stats are allocated dynamically and alloc_percpu needs to be called
1045 * from sleepable context.
1046 */
1047static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
1048{
1049 struct cfq_group *cfqg = NULL;
1050 int i, j, ret;
1051 struct cfq_rb_root *st;
1035 1052
1036 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); 1053 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1037 if (!cfqg) 1054 if (!cfqg)
1038 goto done; 1055 return NULL;
1039 1056
1040 for_each_cfqg_st(cfqg, i, j, st) 1057 for_each_cfqg_st(cfqg, i, j, st)
1041 *st = CFQ_RB_ROOT; 1058 *st = CFQ_RB_ROOT;
@@ -1049,43 +1066,94 @@ static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
1049 */ 1066 */
1050 cfqg->ref = 1; 1067 cfqg->ref = 1;
1051 1068
1069 ret = blkio_alloc_blkg_stats(&cfqg->blkg);
1070 if (ret) {
1071 kfree(cfqg);
1072 return NULL;
1073 }
1074
1075 return cfqg;
1076}
1077
1078static struct cfq_group *
1079cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1080{
1081 struct cfq_group *cfqg = NULL;
1082 void *key = cfqd;
1083 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1084 unsigned int major, minor;
1085
1052 /* 1086 /*
1053 * Add group onto cgroup list. It might happen that bdi->dev is 1087 * This is the common case when there are no blkio cgroups.
1054 * not initialized yet. Initialize this new group without major 1088 * Avoid lookup in this case
1055 * and minor info and this info will be filled in once a new thread
1056 * comes for IO. See code above.
1057 */ 1089 */
1058 if (bdi->dev) { 1090 if (blkcg == &blkio_root_cgroup)
1059 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); 1091 cfqg = &cfqd->root_group;
1060 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 1092 else
1061 MKDEV(major, minor)); 1093 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
1062 } else
1063 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1064 0);
1065
1066 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1067 1094
1068 /* Add group on cfqd list */ 1095 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1069 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); 1096 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1097 cfqg->blkg.dev = MKDEV(major, minor);
1098 }
1070 1099
1071done:
1072 return cfqg; 1100 return cfqg;
1073} 1101}
1074 1102
1075/* 1103/*
1076 * Search for the cfq group current task belongs to. If create = 1, then also 1104 * Search for the cfq group current task belongs to. request_queue lock must
1077 * create the cfq group if it does not exist. request_queue lock must be held. 1105 * be held.
1078 */ 1106 */
1079static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) 1107static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1080{ 1108{
1081 struct blkio_cgroup *blkcg; 1109 struct blkio_cgroup *blkcg;
1082 struct cfq_group *cfqg = NULL; 1110 struct cfq_group *cfqg = NULL, *__cfqg = NULL;
1111 struct request_queue *q = cfqd->queue;
1083 1112
1084 rcu_read_lock(); 1113 rcu_read_lock();
1085 blkcg = task_blkio_cgroup(current); 1114 blkcg = task_blkio_cgroup(current);
1086 cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create); 1115 cfqg = cfq_find_cfqg(cfqd, blkcg);
1087 if (!cfqg && create) 1116 if (cfqg) {
1117 rcu_read_unlock();
1118 return cfqg;
1119 }
1120
1121 /*
1122 * Need to allocate a group. Allocation of group also needs allocation
1123 * of per cpu stats which in-turn takes a mutex() and can block. Hence
1124 * we need to drop rcu lock and queue_lock before we call alloc.
1125 *
1126 * Not taking any queue reference here and assuming that queue is
1127 * around by the time we return. CFQ queue allocation code does
1128 * the same. It might be racy though.
1129 */
1130
1131 rcu_read_unlock();
1132 spin_unlock_irq(q->queue_lock);
1133
1134 cfqg = cfq_alloc_cfqg(cfqd);
1135
1136 spin_lock_irq(q->queue_lock);
1137
1138 rcu_read_lock();
1139 blkcg = task_blkio_cgroup(current);
1140
1141 /*
1142 * If some other thread already allocated the group while we were
1143 * not holding queue lock, free up the group
1144 */
1145 __cfqg = cfq_find_cfqg(cfqd, blkcg);
1146
1147 if (__cfqg) {
1148 kfree(cfqg);
1149 rcu_read_unlock();
1150 return __cfqg;
1151 }
1152
1153 if (!cfqg)
1088 cfqg = &cfqd->root_group; 1154 cfqg = &cfqd->root_group;
1155
1156 cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
1089 rcu_read_unlock(); 1157 rcu_read_unlock();
1090 return cfqg; 1158 return cfqg;
1091} 1159}
@@ -1118,6 +1186,7 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
1118 return; 1186 return;
1119 for_each_cfqg_st(cfqg, i, j, st) 1187 for_each_cfqg_st(cfqg, i, j, st)
1120 BUG_ON(!RB_EMPTY_ROOT(&st->rb)); 1188 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1189 free_percpu(cfqg->blkg.stats_cpu);
1121 kfree(cfqg); 1190 kfree(cfqg);
1122} 1191}
1123 1192
@@ -1176,7 +1245,7 @@ void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1176} 1245}
1177 1246
1178#else /* GROUP_IOSCHED */ 1247#else /* GROUP_IOSCHED */
1179static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) 1248static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1180{ 1249{
1181 return &cfqd->root_group; 1250 return &cfqd->root_group;
1182} 1251}
@@ -1210,7 +1279,6 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1210 struct cfq_rb_root *service_tree; 1279 struct cfq_rb_root *service_tree;
1211 int left; 1280 int left;
1212 int new_cfqq = 1; 1281 int new_cfqq = 1;
1213 int group_changed = 0;
1214 1282
1215 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), 1283 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1216 cfqq_type(cfqq)); 1284 cfqq_type(cfqq));
@@ -1281,7 +1349,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1281 rb_link_node(&cfqq->rb_node, parent, p); 1349 rb_link_node(&cfqq->rb_node, parent, p);
1282 rb_insert_color(&cfqq->rb_node, &service_tree->rb); 1350 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1283 service_tree->count++; 1351 service_tree->count++;
1284 if ((add_front || !new_cfqq) && !group_changed) 1352 if (add_front || !new_cfqq)
1285 return; 1353 return;
1286 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); 1354 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1287} 1355}
@@ -2029,7 +2097,7 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2029 2097
2030 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 2098 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2031 2099
2032 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 2100 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2033} 2101}
2034 2102
2035/* 2103/*
@@ -2911,7 +2979,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2911 struct cfq_group *cfqg; 2979 struct cfq_group *cfqg;
2912 2980
2913retry: 2981retry:
2914 cfqg = cfq_get_cfqg(cfqd, 1); 2982 cfqg = cfq_get_cfqg(cfqd);
2915 cic = cfq_cic_lookup(cfqd, ioc); 2983 cic = cfq_cic_lookup(cfqd, ioc);
2916 /* cic always exists here */ 2984 /* cic always exists here */
2917 cfqq = cic_to_cfqq(cic, is_sync); 2985 cfqq = cic_to_cfqq(cic, is_sync);
@@ -3815,15 +3883,11 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
3815 cfq_put_queue(cfqd->async_idle_cfqq); 3883 cfq_put_queue(cfqd->async_idle_cfqq);
3816} 3884}
3817 3885
3818static void cfq_cfqd_free(struct rcu_head *head)
3819{
3820 kfree(container_of(head, struct cfq_data, rcu));
3821}
3822
3823static void cfq_exit_queue(struct elevator_queue *e) 3886static void cfq_exit_queue(struct elevator_queue *e)
3824{ 3887{
3825 struct cfq_data *cfqd = e->elevator_data; 3888 struct cfq_data *cfqd = e->elevator_data;
3826 struct request_queue *q = cfqd->queue; 3889 struct request_queue *q = cfqd->queue;
3890 bool wait = false;
3827 3891
3828 cfq_shutdown_timer_wq(cfqd); 3892 cfq_shutdown_timer_wq(cfqd);
3829 3893
@@ -3842,7 +3906,13 @@ static void cfq_exit_queue(struct elevator_queue *e)
3842 3906
3843 cfq_put_async_queues(cfqd); 3907 cfq_put_async_queues(cfqd);
3844 cfq_release_cfq_groups(cfqd); 3908 cfq_release_cfq_groups(cfqd);
3845 cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg); 3909
3910 /*
3911 * If there are groups which we could not unlink from blkcg list,
3912 * wait for a rcu period for them to be freed.
3913 */
3914 if (cfqd->nr_blkcg_linked_grps)
3915 wait = true;
3846 3916
3847 spin_unlock_irq(q->queue_lock); 3917 spin_unlock_irq(q->queue_lock);
3848 3918
@@ -3852,8 +3922,25 @@ static void cfq_exit_queue(struct elevator_queue *e)
3852 ida_remove(&cic_index_ida, cfqd->cic_index); 3922 ida_remove(&cic_index_ida, cfqd->cic_index);
3853 spin_unlock(&cic_index_lock); 3923 spin_unlock(&cic_index_lock);
3854 3924
3855 /* Wait for cfqg->blkg->key accessors to exit their grace periods. */ 3925 /*
3856 call_rcu(&cfqd->rcu, cfq_cfqd_free); 3926 * Wait for cfqg->blkg->key accessors to exit their grace periods.
3927 * Do this wait only if there are other unlinked groups out
3928 * there. This can happen if cgroup deletion path claimed the
3929 * responsibility of cleaning up a group before queue cleanup code
3930 * get to the group.
3931 *
3932 * Do not call synchronize_rcu() unconditionally as there are drivers
3933 * which create/delete request queue hundreds of times during scan/boot
3934 * and synchronize_rcu() can take significant time and slow down boot.
3935 */
3936 if (wait)
3937 synchronize_rcu();
3938
3939#ifdef CONFIG_CFQ_GROUP_IOSCHED
3940 /* Free up per cpu stats for root group */
3941 free_percpu(cfqd->root_group.blkg.stats_cpu);
3942#endif
3943 kfree(cfqd);
3857} 3944}
3858 3945
3859static int cfq_alloc_cic_index(void) 3946static int cfq_alloc_cic_index(void)
@@ -3886,8 +3973,12 @@ static void *cfq_init_queue(struct request_queue *q)
3886 return NULL; 3973 return NULL;
3887 3974
3888 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 3975 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3889 if (!cfqd) 3976 if (!cfqd) {
3977 spin_lock(&cic_index_lock);
3978 ida_remove(&cic_index_ida, i);
3979 spin_unlock(&cic_index_lock);
3890 return NULL; 3980 return NULL;
3981 }
3891 3982
3892 /* 3983 /*
3893 * Don't need take queue_lock in the routine, since we are 3984 * Don't need take queue_lock in the routine, since we are
@@ -3909,14 +4000,29 @@ static void *cfq_init_queue(struct request_queue *q)
3909 4000
3910#ifdef CONFIG_CFQ_GROUP_IOSCHED 4001#ifdef CONFIG_CFQ_GROUP_IOSCHED
3911 /* 4002 /*
3912 * Take a reference to root group which we never drop. This is just 4003 * Set root group reference to 2. One reference will be dropped when
3913 * to make sure that cfq_put_cfqg() does not try to kfree root group 4004 * all groups on cfqd->cfqg_list are being deleted during queue exit.
4005 * Other reference will remain there as we don't want to delete this
4006 * group as it is statically allocated and gets destroyed when
4007 * throtl_data goes away.
3914 */ 4008 */
3915 cfqg->ref = 1; 4009 cfqg->ref = 2;
4010
4011 if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
4012 kfree(cfqg);
4013 kfree(cfqd);
4014 return NULL;
4015 }
4016
3916 rcu_read_lock(); 4017 rcu_read_lock();
4018
3917 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, 4019 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3918 (void *)cfqd, 0); 4020 (void *)cfqd, 0);
3919 rcu_read_unlock(); 4021 rcu_read_unlock();
4022 cfqd->nr_blkcg_linked_grps++;
4023
4024 /* Add group on cfqd->cfqg_list */
4025 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
3920#endif 4026#endif
3921 /* 4027 /*
3922 * Not strictly needed (since RB_ROOT just clears the node and we 4028 * Not strictly needed (since RB_ROOT just clears the node and we
diff --git a/block/elevator.c b/block/elevator.c
index 45ca1e34f582..b0b38ce0dcb6 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -155,13 +155,8 @@ static struct elevator_type *elevator_get(const char *name)
155 155
156 e = elevator_find(name); 156 e = elevator_find(name);
157 if (!e) { 157 if (!e) {
158 char elv[ELV_NAME_MAX + strlen("-iosched")];
159
160 spin_unlock(&elv_list_lock); 158 spin_unlock(&elv_list_lock);
161 159 request_module("%s-iosched", name);
162 snprintf(elv, sizeof(elv), "%s-iosched", name);
163
164 request_module("%s", elv);
165 spin_lock(&elv_list_lock); 160 spin_lock(&elv_list_lock);
166 e = elevator_find(name); 161 e = elevator_find(name);
167 } 162 }
@@ -421,8 +416,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
421 struct list_head *entry; 416 struct list_head *entry;
422 int stop_flags; 417 int stop_flags;
423 418
424 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
425
426 if (q->last_merge == rq) 419 if (q->last_merge == rq)
427 q->last_merge = NULL; 420 q->last_merge = NULL;
428 421
@@ -661,8 +654,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
661 654
662 rq->q = q; 655 rq->q = q;
663 656
664 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
665
666 if (rq->cmd_flags & REQ_SOFTBARRIER) { 657 if (rq->cmd_flags & REQ_SOFTBARRIER) {
667 /* barriers are scheduling boundary, update end_sector */ 658 /* barriers are scheduling boundary, update end_sector */
668 if (rq->cmd_type == REQ_TYPE_FS || 659 if (rq->cmd_type == REQ_TYPE_FS ||
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 61631edfecc2..3bb154d8c8cc 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -54,6 +54,8 @@ source "drivers/spi/Kconfig"
54 54
55source "drivers/pps/Kconfig" 55source "drivers/pps/Kconfig"
56 56
57source "drivers/ptp/Kconfig"
58
57source "drivers/gpio/Kconfig" 59source "drivers/gpio/Kconfig"
58 60
59source "drivers/w1/Kconfig" 61source "drivers/w1/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 145aeadb6c03..6b17f5864340 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_I2O) += message/
75obj-$(CONFIG_RTC_LIB) += rtc/ 75obj-$(CONFIG_RTC_LIB) += rtc/
76obj-y += i2c/ media/ 76obj-y += i2c/ media/
77obj-$(CONFIG_PPS) += pps/ 77obj-$(CONFIG_PPS) += pps/
78obj-$(CONFIG_PTP_1588_CLOCK) += ptp/
78obj-$(CONFIG_W1) += w1/ 79obj-$(CONFIG_W1) += w1/
79obj-$(CONFIG_POWER_SUPPLY) += power/ 80obj-$(CONFIG_POWER_SUPPLY) += power/
80obj-$(CONFIG_HWMON) += hwmon/ 81obj-$(CONFIG_HWMON) += hwmon/
@@ -94,7 +95,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/
94obj-$(CONFIG_DMA_ENGINE) += dma/ 95obj-$(CONFIG_DMA_ENGINE) += dma/
95obj-$(CONFIG_MMC) += mmc/ 96obj-$(CONFIG_MMC) += mmc/
96obj-$(CONFIG_MEMSTICK) += memstick/ 97obj-$(CONFIG_MEMSTICK) += memstick/
97obj-$(CONFIG_NEW_LEDS) += leds/ 98obj-y += leds/
98obj-$(CONFIG_INFINIBAND) += infiniband/ 99obj-$(CONFIG_INFINIBAND) += infiniband/
99obj-$(CONFIG_SGI_SN) += sn/ 100obj-$(CONFIG_SGI_SN) += sn/
100obj-y += firmware/ 101obj-y += firmware/
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 096aebfe7f32..f74b2ea11f21 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -101,6 +101,14 @@ static DEFINE_MUTEX(einj_mutex);
101 101
102static struct einj_parameter *einj_param; 102static struct einj_parameter *einj_param;
103 103
104#ifndef writeq
105static inline void writeq(__u64 val, volatile void __iomem *addr)
106{
107 writel(val, addr);
108 writel(val >> 32, addr+4);
109}
110#endif
111
104static void einj_exec_ctx_init(struct apei_exec_context *ctx) 112static void einj_exec_ctx_init(struct apei_exec_context *ctx)
105{ 113{
106 apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), 114 apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 542e53903891..7489b89c300f 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -280,9 +280,11 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
280 case 32: 280 case 32:
281 *val = readl(addr); 281 *val = readl(addr);
282 break; 282 break;
283#ifdef readq
283 case 64: 284 case 64:
284 *val = readq(addr); 285 *val = readq(addr);
285 break; 286 break;
287#endif
286 default: 288 default:
287 return -EINVAL; 289 return -EINVAL;
288 } 290 }
@@ -307,9 +309,11 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
307 case 32: 309 case 32:
308 writel(val, addr); 310 writel(val, addr);
309 break; 311 break;
312#ifdef writeq
310 case 64: 313 case 64:
311 writeq(val, addr); 314 writeq(val, addr);
312 break; 315 break;
316#endif
313 default: 317 default:
314 return -EINVAL; 318 return -EINVAL;
315 } 319 }
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 30ea95f43e79..d51f9795c064 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1089,21 +1089,21 @@ static int atapi_drain_needed(struct request *rq)
1089static int ata_scsi_dev_config(struct scsi_device *sdev, 1089static int ata_scsi_dev_config(struct scsi_device *sdev,
1090 struct ata_device *dev) 1090 struct ata_device *dev)
1091{ 1091{
1092 struct request_queue *q = sdev->request_queue;
1093
1092 if (!ata_id_has_unload(dev->id)) 1094 if (!ata_id_has_unload(dev->id))
1093 dev->flags |= ATA_DFLAG_NO_UNLOAD; 1095 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1094 1096
1095 /* configure max sectors */ 1097 /* configure max sectors */
1096 blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors); 1098 blk_queue_max_hw_sectors(q, dev->max_sectors);
1097 1099
1098 if (dev->class == ATA_DEV_ATAPI) { 1100 if (dev->class == ATA_DEV_ATAPI) {
1099 struct request_queue *q = sdev->request_queue;
1100 void *buf; 1101 void *buf;
1101 1102
1102 sdev->sector_size = ATA_SECT_SIZE; 1103 sdev->sector_size = ATA_SECT_SIZE;
1103 1104
1104 /* set DMA padding */ 1105 /* set DMA padding */
1105 blk_queue_update_dma_pad(sdev->request_queue, 1106 blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
1106 ATA_DMA_PAD_SZ - 1);
1107 1107
1108 /* configure draining */ 1108 /* configure draining */
1109 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); 1109 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
@@ -1131,8 +1131,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1131 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n", 1131 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
1132 sdev->sector_size); 1132 sdev->sector_size);
1133 1133
1134 blk_queue_update_dma_alignment(sdev->request_queue, 1134 blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
1135 sdev->sector_size - 1);
1136 1135
1137 if (dev->flags & ATA_DFLAG_AN) 1136 if (dev->flags & ATA_DFLAG_AN)
1138 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 1137 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -1145,6 +1144,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1145 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 1144 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1146 } 1145 }
1147 1146
1147 blk_queue_flush_queueable(q, false);
1148
1148 dev->sdev = sdev; 1149 dev->sdev = sdev;
1149 return 0; 1150 return 0;
1150} 1151}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index b3b72d64e805..793f796c4da3 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -7,6 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/memory.h> 9#include <linux/memory.h>
10#include <linux/vmstat.h>
10#include <linux/node.h> 11#include <linux/node.h>
11#include <linux/hugetlb.h> 12#include <linux/hugetlb.h>
12#include <linux/compaction.h> 13#include <linux/compaction.h>
@@ -179,11 +180,14 @@ static ssize_t node_read_vmstat(struct sys_device *dev,
179 struct sysdev_attribute *attr, char *buf) 180 struct sysdev_attribute *attr, char *buf)
180{ 181{
181 int nid = dev->id; 182 int nid = dev->id;
182 return sprintf(buf, 183 int i;
183 "nr_written %lu\n" 184 int n = 0;
184 "nr_dirtied %lu\n", 185
185 node_page_state(nid, NR_WRITTEN), 186 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
186 node_page_state(nid, NR_DIRTIED)); 187 n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
188 node_page_state(nid, i));
189
190 return n;
187} 191}
188static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL); 192static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
189 193
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 83c32cb72582..717d6e4e18d3 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -470,6 +470,27 @@ config XEN_BLKDEV_FRONTEND
470 block device driver. It communicates with a back-end driver 470 block device driver. It communicates with a back-end driver
471 in another domain which drives the actual block device. 471 in another domain which drives the actual block device.
472 472
473config XEN_BLKDEV_BACKEND
474 tristate "Block-device backend driver"
475 depends on XEN_BACKEND
476 help
477 The block-device backend driver allows the kernel to export its
478 block devices to other guests via a high-performance shared-memory
479 interface.
480
481 The corresponding Linux frontend driver is enabled by the
482 CONFIG_XEN_BLKDEV_FRONTEND configuration option.
483
484 The backend driver attaches itself to a any block device specified
485 in the XenBus configuration. There are no limits to what the block
486 device as long as it has a major and minor.
487
488 If you are compiling a kernel to run in a Xen block backend driver
489 domain (often this is domain 0) you should say Y here. To
490 compile this driver as a module, chose M here: the module
491 will be called xen-blkback.
492
493
473config VIRTIO_BLK 494config VIRTIO_BLK
474 tristate "Virtio block driver (EXPERIMENTAL)" 495 tristate "Virtio block driver (EXPERIMENTAL)"
475 depends on EXPERIMENTAL && VIRTIO 496 depends on EXPERIMENTAL && VIRTIO
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 40528ba56d1b..76646e9a1c91 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_BLK_DEV_UB) += ub.o
36obj-$(CONFIG_BLK_DEV_HD) += hd.o 36obj-$(CONFIG_BLK_DEV_HD) += hd.o
37 37
38obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o 38obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
39obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
39obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ 40obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
40obj-$(CONFIG_BLK_DEV_RBD) += rbd.o 41obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
41 42
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9bf13988f1a2..8f4ef656a1af 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -64,6 +64,10 @@ MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
64MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 64MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
65MODULE_VERSION("3.6.26"); 65MODULE_VERSION("3.6.26");
66MODULE_LICENSE("GPL"); 66MODULE_LICENSE("GPL");
67static int cciss_tape_cmds = 6;
68module_param(cciss_tape_cmds, int, 0644);
69MODULE_PARM_DESC(cciss_tape_cmds,
70 "number of commands to allocate for tape devices (default: 6)");
67 71
68static DEFINE_MUTEX(cciss_mutex); 72static DEFINE_MUTEX(cciss_mutex);
69static struct proc_dir_entry *proc_cciss; 73static struct proc_dir_entry *proc_cciss;
@@ -194,6 +198,8 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
194static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, 198static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
195 unsigned long *memory_bar); 199 unsigned long *memory_bar);
196static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); 200static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
201static __devinit int write_driver_ver_to_cfgtable(
202 CfgTable_struct __iomem *cfgtable);
197 203
198/* performant mode helper functions */ 204/* performant mode helper functions */
199static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, 205static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
@@ -556,7 +562,7 @@ static void __devinit cciss_procinit(ctlr_info_t *h)
556#define to_hba(n) container_of(n, struct ctlr_info, dev) 562#define to_hba(n) container_of(n, struct ctlr_info, dev)
557#define to_drv(n) container_of(n, drive_info_struct, dev) 563#define to_drv(n) container_of(n, drive_info_struct, dev)
558 564
559/* List of controllers which cannot be reset on kexec with reset_devices */ 565/* List of controllers which cannot be hard reset on kexec with reset_devices */
560static u32 unresettable_controller[] = { 566static u32 unresettable_controller[] = {
561 0x324a103C, /* Smart Array P712m */ 567 0x324a103C, /* Smart Array P712m */
562 0x324b103C, /* SmartArray P711m */ 568 0x324b103C, /* SmartArray P711m */
@@ -574,23 +580,45 @@ static u32 unresettable_controller[] = {
574 0x409D0E11, /* Smart Array 6400 EM */ 580 0x409D0E11, /* Smart Array 6400 EM */
575}; 581};
576 582
577static int ctlr_is_resettable(struct ctlr_info *h) 583/* List of controllers which cannot even be soft reset */
584static u32 soft_unresettable_controller[] = {
585 0x409C0E11, /* Smart Array 6400 */
586 0x409D0E11, /* Smart Array 6400 EM */
587};
588
589static int ctlr_is_hard_resettable(u32 board_id)
578{ 590{
579 int i; 591 int i;
580 592
581 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 593 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
582 if (unresettable_controller[i] == h->board_id) 594 if (unresettable_controller[i] == board_id)
583 return 0; 595 return 0;
584 return 1; 596 return 1;
585} 597}
586 598
599static int ctlr_is_soft_resettable(u32 board_id)
600{
601 int i;
602
603 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
604 if (soft_unresettable_controller[i] == board_id)
605 return 0;
606 return 1;
607}
608
609static int ctlr_is_resettable(u32 board_id)
610{
611 return ctlr_is_hard_resettable(board_id) ||
612 ctlr_is_soft_resettable(board_id);
613}
614
587static ssize_t host_show_resettable(struct device *dev, 615static ssize_t host_show_resettable(struct device *dev,
588 struct device_attribute *attr, 616 struct device_attribute *attr,
589 char *buf) 617 char *buf)
590{ 618{
591 struct ctlr_info *h = to_hba(dev); 619 struct ctlr_info *h = to_hba(dev);
592 620
593 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); 621 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
594} 622}
595static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); 623static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL);
596 624
@@ -2567,7 +2595,7 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
2567 } 2595 }
2568 } else if (cmd_type == TYPE_MSG) { 2596 } else if (cmd_type == TYPE_MSG) {
2569 switch (cmd) { 2597 switch (cmd) {
2570 case 0: /* ABORT message */ 2598 case CCISS_ABORT_MSG:
2571 c->Request.CDBLen = 12; 2599 c->Request.CDBLen = 12;
2572 c->Request.Type.Attribute = ATTR_SIMPLE; 2600 c->Request.Type.Attribute = ATTR_SIMPLE;
2573 c->Request.Type.Direction = XFER_WRITE; 2601 c->Request.Type.Direction = XFER_WRITE;
@@ -2577,16 +2605,16 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
2577 /* buff contains the tag of the command to abort */ 2605 /* buff contains the tag of the command to abort */
2578 memcpy(&c->Request.CDB[4], buff, 8); 2606 memcpy(&c->Request.CDB[4], buff, 8);
2579 break; 2607 break;
2580 case 1: /* RESET message */ 2608 case CCISS_RESET_MSG:
2581 c->Request.CDBLen = 16; 2609 c->Request.CDBLen = 16;
2582 c->Request.Type.Attribute = ATTR_SIMPLE; 2610 c->Request.Type.Attribute = ATTR_SIMPLE;
2583 c->Request.Type.Direction = XFER_NONE; 2611 c->Request.Type.Direction = XFER_NONE;
2584 c->Request.Timeout = 0; 2612 c->Request.Timeout = 0;
2585 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 2613 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2586 c->Request.CDB[0] = cmd; /* reset */ 2614 c->Request.CDB[0] = cmd; /* reset */
2587 c->Request.CDB[1] = 0x03; /* reset a target */ 2615 c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET;
2588 break; 2616 break;
2589 case 3: /* No-Op message */ 2617 case CCISS_NOOP_MSG:
2590 c->Request.CDBLen = 1; 2618 c->Request.CDBLen = 1;
2591 c->Request.Type.Attribute = ATTR_SIMPLE; 2619 c->Request.Type.Attribute = ATTR_SIMPLE;
2592 c->Request.Type.Direction = XFER_WRITE; 2620 c->Request.Type.Direction = XFER_WRITE;
@@ -2615,6 +2643,31 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
2615 return status; 2643 return status;
2616} 2644}
2617 2645
2646static int __devinit cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
2647 u8 reset_type)
2648{
2649 CommandList_struct *c;
2650 int return_status;
2651
2652 c = cmd_alloc(h);
2653 if (!c)
2654 return -ENOMEM;
2655 return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0,
2656 CTLR_LUNID, TYPE_MSG);
2657 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
2658 if (return_status != IO_OK) {
2659 cmd_special_free(h, c);
2660 return return_status;
2661 }
2662 c->waiting = NULL;
2663 enqueue_cmd_and_start_io(h, c);
2664 /* Don't wait for completion, the reset won't complete. Don't free
2665 * the command either. This is the last command we will send before
2666 * re-initializing everything, so it doesn't matter and won't leak.
2667 */
2668 return 0;
2669}
2670
2618static int check_target_status(ctlr_info_t *h, CommandList_struct *c) 2671static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
2619{ 2672{
2620 switch (c->err_info->ScsiStatus) { 2673 switch (c->err_info->ScsiStatus) {
@@ -3461,6 +3514,63 @@ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
3461 return next_command(h); 3514 return next_command(h);
3462} 3515}
3463 3516
3517/* Some controllers, like p400, will give us one interrupt
3518 * after a soft reset, even if we turned interrupts off.
3519 * Only need to check for this in the cciss_xxx_discard_completions
3520 * functions.
3521 */
3522static int ignore_bogus_interrupt(ctlr_info_t *h)
3523{
3524 if (likely(!reset_devices))
3525 return 0;
3526
3527 if (likely(h->interrupts_enabled))
3528 return 0;
3529
3530 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3531 "(known firmware bug.) Ignoring.\n");
3532
3533 return 1;
3534}
3535
3536static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id)
3537{
3538 ctlr_info_t *h = dev_id;
3539 unsigned long flags;
3540 u32 raw_tag;
3541
3542 if (ignore_bogus_interrupt(h))
3543 return IRQ_NONE;
3544
3545 if (interrupt_not_for_us(h))
3546 return IRQ_NONE;
3547 spin_lock_irqsave(&h->lock, flags);
3548 while (interrupt_pending(h)) {
3549 raw_tag = get_next_completion(h);
3550 while (raw_tag != FIFO_EMPTY)
3551 raw_tag = next_command(h);
3552 }
3553 spin_unlock_irqrestore(&h->lock, flags);
3554 return IRQ_HANDLED;
3555}
3556
3557static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id)
3558{
3559 ctlr_info_t *h = dev_id;
3560 unsigned long flags;
3561 u32 raw_tag;
3562
3563 if (ignore_bogus_interrupt(h))
3564 return IRQ_NONE;
3565
3566 spin_lock_irqsave(&h->lock, flags);
3567 raw_tag = get_next_completion(h);
3568 while (raw_tag != FIFO_EMPTY)
3569 raw_tag = next_command(h);
3570 spin_unlock_irqrestore(&h->lock, flags);
3571 return IRQ_HANDLED;
3572}
3573
3464static irqreturn_t do_cciss_intx(int irq, void *dev_id) 3574static irqreturn_t do_cciss_intx(int irq, void *dev_id)
3465{ 3575{
3466 ctlr_info_t *h = dev_id; 3576 ctlr_info_t *h = dev_id;
@@ -4078,6 +4188,9 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
4078 cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); 4188 cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable));
4079 if (!h->cfgtable) 4189 if (!h->cfgtable)
4080 return -ENOMEM; 4190 return -ENOMEM;
4191 rc = write_driver_ver_to_cfgtable(h->cfgtable);
4192 if (rc)
4193 return rc;
4081 /* Find performant mode table. */ 4194 /* Find performant mode table. */
4082 trans_offset = readl(&h->cfgtable->TransMethodOffset); 4195 trans_offset = readl(&h->cfgtable->TransMethodOffset);
4083 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 4196 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
@@ -4112,7 +4225,7 @@ static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
4112static void __devinit cciss_find_board_params(ctlr_info_t *h) 4225static void __devinit cciss_find_board_params(ctlr_info_t *h)
4113{ 4226{
4114 cciss_get_max_perf_mode_cmds(h); 4227 cciss_get_max_perf_mode_cmds(h);
4115 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 4228 h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
4116 h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); 4229 h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
4117 /* 4230 /*
4118 * Limit in-command s/g elements to 32 save dma'able memory. 4231 * Limit in-command s/g elements to 32 save dma'able memory.
@@ -4348,7 +4461,7 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
4348 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 4461 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
4349 if ((tag & ~3) == paddr32) 4462 if ((tag & ~3) == paddr32)
4350 break; 4463 break;
4351 schedule_timeout_uninterruptible(HZ); 4464 msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS);
4352 } 4465 }
4353 4466
4354 iounmap(vaddr); 4467 iounmap(vaddr);
@@ -4375,11 +4488,10 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
4375 return 0; 4488 return 0;
4376} 4489}
4377 4490
4378#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
4379#define cciss_noop(p) cciss_message(p, 3, 0) 4491#define cciss_noop(p) cciss_message(p, 3, 0)
4380 4492
4381static int cciss_controller_hard_reset(struct pci_dev *pdev, 4493static int cciss_controller_hard_reset(struct pci_dev *pdev,
4382 void * __iomem vaddr, bool use_doorbell) 4494 void * __iomem vaddr, u32 use_doorbell)
4383{ 4495{
4384 u16 pmcsr; 4496 u16 pmcsr;
4385 int pos; 4497 int pos;
@@ -4390,8 +4502,7 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
4390 * other way using the doorbell register. 4502 * other way using the doorbell register.
4391 */ 4503 */
4392 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 4504 dev_info(&pdev->dev, "using doorbell to reset controller\n");
4393 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); 4505 writel(use_doorbell, vaddr + SA5_DOORBELL);
4394 msleep(1000);
4395 } else { /* Try to do it the PCI power state way */ 4506 } else { /* Try to do it the PCI power state way */
4396 4507
4397 /* Quoting from the Open CISS Specification: "The Power 4508 /* Quoting from the Open CISS Specification: "The Power
@@ -4422,12 +4533,64 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
4422 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4533 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4423 pmcsr |= PCI_D0; 4534 pmcsr |= PCI_D0;
4424 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 4535 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
4425
4426 msleep(500);
4427 } 4536 }
4428 return 0; 4537 return 0;
4429} 4538}
4430 4539
4540static __devinit void init_driver_version(char *driver_version, int len)
4541{
4542 memset(driver_version, 0, len);
4543 strncpy(driver_version, "cciss " DRIVER_NAME, len - 1);
4544}
4545
4546static __devinit int write_driver_ver_to_cfgtable(
4547 CfgTable_struct __iomem *cfgtable)
4548{
4549 char *driver_version;
4550 int i, size = sizeof(cfgtable->driver_version);
4551
4552 driver_version = kmalloc(size, GFP_KERNEL);
4553 if (!driver_version)
4554 return -ENOMEM;
4555
4556 init_driver_version(driver_version, size);
4557 for (i = 0; i < size; i++)
4558 writeb(driver_version[i], &cfgtable->driver_version[i]);
4559 kfree(driver_version);
4560 return 0;
4561}
4562
4563static __devinit void read_driver_ver_from_cfgtable(
4564 CfgTable_struct __iomem *cfgtable, unsigned char *driver_ver)
4565{
4566 int i;
4567
4568 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
4569 driver_ver[i] = readb(&cfgtable->driver_version[i]);
4570}
4571
4572static __devinit int controller_reset_failed(
4573 CfgTable_struct __iomem *cfgtable)
4574{
4575
4576 char *driver_ver, *old_driver_ver;
4577 int rc, size = sizeof(cfgtable->driver_version);
4578
4579 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
4580 if (!old_driver_ver)
4581 return -ENOMEM;
4582 driver_ver = old_driver_ver + size;
4583
4584 /* After a reset, the 32 bytes of "driver version" in the cfgtable
4585 * should have been changed, otherwise we know the reset failed.
4586 */
4587 init_driver_version(old_driver_ver, size);
4588 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
4589 rc = !memcmp(driver_ver, old_driver_ver, size);
4590 kfree(old_driver_ver);
4591 return rc;
4592}
4593
4431/* This does a hard reset of the controller using PCI power management 4594/* This does a hard reset of the controller using PCI power management
4432 * states or using the doorbell register. */ 4595 * states or using the doorbell register. */
4433static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) 4596static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
@@ -4437,10 +4600,10 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4437 u64 cfg_base_addr_index; 4600 u64 cfg_base_addr_index;
4438 void __iomem *vaddr; 4601 void __iomem *vaddr;
4439 unsigned long paddr; 4602 unsigned long paddr;
4440 u32 misc_fw_support, active_transport; 4603 u32 misc_fw_support;
4441 int rc; 4604 int rc;
4442 CfgTable_struct __iomem *cfgtable; 4605 CfgTable_struct __iomem *cfgtable;
4443 bool use_doorbell; 4606 u32 use_doorbell;
4444 u32 board_id; 4607 u32 board_id;
4445 u16 command_register; 4608 u16 command_register;
4446 4609
@@ -4464,12 +4627,16 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4464 * likely not be happy. Just forbid resetting this conjoined mess. 4627 * likely not be happy. Just forbid resetting this conjoined mess.
4465 */ 4628 */
4466 cciss_lookup_board_id(pdev, &board_id); 4629 cciss_lookup_board_id(pdev, &board_id);
4467 if (board_id == 0x409C0E11 || board_id == 0x409D0E11) { 4630 if (!ctlr_is_resettable(board_id)) {
4468 dev_warn(&pdev->dev, "Cannot reset Smart Array 640x " 4631 dev_warn(&pdev->dev, "Cannot reset Smart Array 640x "
4469 "due to shared cache module."); 4632 "due to shared cache module.");
4470 return -ENODEV; 4633 return -ENODEV;
4471 } 4634 }
4472 4635
4636 /* if controller is soft- but not hard resettable... */
4637 if (!ctlr_is_hard_resettable(board_id))
4638 return -ENOTSUPP; /* try soft reset later. */
4639
4473 /* Save the PCI command register */ 4640 /* Save the PCI command register */
4474 pci_read_config_word(pdev, 4, &command_register); 4641 pci_read_config_word(pdev, 4, &command_register);
4475 /* Turn the board off. This is so that later pci_restore_state() 4642 /* Turn the board off. This is so that later pci_restore_state()
@@ -4497,16 +4664,28 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4497 rc = -ENOMEM; 4664 rc = -ENOMEM;
4498 goto unmap_vaddr; 4665 goto unmap_vaddr;
4499 } 4666 }
4667 rc = write_driver_ver_to_cfgtable(cfgtable);
4668 if (rc)
4669 goto unmap_vaddr;
4500 4670
4501 /* If reset via doorbell register is supported, use that. */ 4671 /* If reset via doorbell register is supported, use that.
4502 misc_fw_support = readl(&cfgtable->misc_fw_support); 4672 * There are two such methods. Favor the newest method.
4503 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
4504
4505 /* The doorbell reset seems to cause lockups on some Smart
4506 * Arrays (e.g. P410, P410i, maybe others). Until this is
4507 * fixed or at least isolated, avoid the doorbell reset.
4508 */ 4673 */
4509 use_doorbell = 0; 4674 misc_fw_support = readl(&cfgtable->misc_fw_support);
4675 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
4676 if (use_doorbell) {
4677 use_doorbell = DOORBELL_CTLR_RESET2;
4678 } else {
4679 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
4680 if (use_doorbell) {
4681 dev_warn(&pdev->dev, "Controller claims that "
4682 "'Bit 2 doorbell reset' is "
4683 "supported, but not 'bit 5 doorbell reset'. "
4684 "Firmware update is recommended.\n");
4685 rc = -ENOTSUPP; /* use the soft reset */
4686 goto unmap_cfgtable;
4687 }
4688 }
4510 4689
4511 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4690 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
4512 if (rc) 4691 if (rc)
@@ -4524,30 +4703,31 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4524 msleep(CCISS_POST_RESET_PAUSE_MSECS); 4703 msleep(CCISS_POST_RESET_PAUSE_MSECS);
4525 4704
4526 /* Wait for board to become not ready, then ready. */ 4705 /* Wait for board to become not ready, then ready. */
4527 dev_info(&pdev->dev, "Waiting for board to become ready.\n"); 4706 dev_info(&pdev->dev, "Waiting for board to reset.\n");
4528 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 4707 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
4529 if (rc) /* Don't bail, might be E500, etc. which can't be reset */ 4708 if (rc) {
4530 dev_warn(&pdev->dev, 4709 dev_warn(&pdev->dev, "Failed waiting for board to hard reset."
4531 "failed waiting for board to become not ready\n"); 4710 " Will try soft reset.\n");
4711 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
4712 goto unmap_cfgtable;
4713 }
4532 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY); 4714 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
4533 if (rc) { 4715 if (rc) {
4534 dev_warn(&pdev->dev, 4716 dev_warn(&pdev->dev,
4535 "failed waiting for board to become ready\n"); 4717 "failed waiting for board to become ready "
4718 "after hard reset\n");
4536 goto unmap_cfgtable; 4719 goto unmap_cfgtable;
4537 } 4720 }
4538 dev_info(&pdev->dev, "board ready.\n");
4539 4721
4540 /* Controller should be in simple mode at this point. If it's not, 4722 rc = controller_reset_failed(vaddr);
4541 * It means we're on one of those controllers which doesn't support 4723 if (rc < 0)
4542 * the doorbell reset method and on which the PCI power management reset 4724 goto unmap_cfgtable;
4543 * method doesn't work (P800, for example.) 4725 if (rc) {
4544 * In those cases, don't try to proceed, as it generally doesn't work. 4726 dev_warn(&pdev->dev, "Unable to successfully hard reset "
4545 */ 4727 "controller. Will try soft reset.\n");
4546 active_transport = readl(&cfgtable->TransportActive); 4728 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
4547 if (active_transport & PERFORMANT_MODE) { 4729 } else {
4548 dev_warn(&pdev->dev, "Unable to successfully reset controller," 4730 dev_info(&pdev->dev, "Board ready after hard reset.\n");
4549 " Ignoring controller.\n");
4550 rc = -ENODEV;
4551 } 4731 }
4552 4732
4553unmap_cfgtable: 4733unmap_cfgtable:
@@ -4574,11 +4754,12 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
4574 * due to concerns about shared bbwc between 6402/6404 pair. 4754 * due to concerns about shared bbwc between 6402/6404 pair.
4575 */ 4755 */
4576 if (rc == -ENOTSUPP) 4756 if (rc == -ENOTSUPP)
4577 return 0; /* just try to do the kdump anyhow. */ 4757 return rc; /* just try to do the kdump anyhow. */
4578 if (rc) 4758 if (rc)
4579 return -ENODEV; 4759 return -ENODEV;
4580 4760
4581 /* Now try to get the controller to respond to a no-op */ 4761 /* Now try to get the controller to respond to a no-op */
4762 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
4582 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { 4763 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
4583 if (cciss_noop(pdev) == 0) 4764 if (cciss_noop(pdev) == 0)
4584 break; 4765 break;
@@ -4591,6 +4772,148 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
4591 return 0; 4772 return 0;
4592} 4773}
4593 4774
4775static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h)
4776{
4777 h->cmd_pool_bits = kmalloc(
4778 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
4779 sizeof(unsigned long), GFP_KERNEL);
4780 h->cmd_pool = pci_alloc_consistent(h->pdev,
4781 h->nr_cmds * sizeof(CommandList_struct),
4782 &(h->cmd_pool_dhandle));
4783 h->errinfo_pool = pci_alloc_consistent(h->pdev,
4784 h->nr_cmds * sizeof(ErrorInfo_struct),
4785 &(h->errinfo_pool_dhandle));
4786 if ((h->cmd_pool_bits == NULL)
4787 || (h->cmd_pool == NULL)
4788 || (h->errinfo_pool == NULL)) {
4789 dev_err(&h->pdev->dev, "out of memory");
4790 return -ENOMEM;
4791 }
4792 return 0;
4793}
4794
4795static __devinit int cciss_allocate_scatterlists(ctlr_info_t *h)
4796{
4797 int i;
4798
4799 /* zero it, so that on free we need not know how many were alloc'ed */
4800 h->scatter_list = kzalloc(h->max_commands *
4801 sizeof(struct scatterlist *), GFP_KERNEL);
4802 if (!h->scatter_list)
4803 return -ENOMEM;
4804
4805 for (i = 0; i < h->nr_cmds; i++) {
4806 h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) *
4807 h->maxsgentries, GFP_KERNEL);
4808 if (h->scatter_list[i] == NULL) {
4809 dev_err(&h->pdev->dev, "could not allocate "
4810 "s/g lists\n");
4811 return -ENOMEM;
4812 }
4813 }
4814 return 0;
4815}
4816
4817static void cciss_free_scatterlists(ctlr_info_t *h)
4818{
4819 int i;
4820
4821 if (h->scatter_list) {
4822 for (i = 0; i < h->nr_cmds; i++)
4823 kfree(h->scatter_list[i]);
4824 kfree(h->scatter_list);
4825 }
4826}
4827
4828static void cciss_free_cmd_pool(ctlr_info_t *h)
4829{
4830 kfree(h->cmd_pool_bits);
4831 if (h->cmd_pool)
4832 pci_free_consistent(h->pdev,
4833 h->nr_cmds * sizeof(CommandList_struct),
4834 h->cmd_pool, h->cmd_pool_dhandle);
4835 if (h->errinfo_pool)
4836 pci_free_consistent(h->pdev,
4837 h->nr_cmds * sizeof(ErrorInfo_struct),
4838 h->errinfo_pool, h->errinfo_pool_dhandle);
4839}
4840
4841static int cciss_request_irq(ctlr_info_t *h,
4842 irqreturn_t (*msixhandler)(int, void *),
4843 irqreturn_t (*intxhandler)(int, void *))
4844{
4845 if (h->msix_vector || h->msi_vector) {
4846 if (!request_irq(h->intr[PERF_MODE_INT], msixhandler,
4847 IRQF_DISABLED, h->devname, h))
4848 return 0;
4849 dev_err(&h->pdev->dev, "Unable to get msi irq %d"
4850 " for %s\n", h->intr[PERF_MODE_INT],
4851 h->devname);
4852 return -1;
4853 }
4854
4855 if (!request_irq(h->intr[PERF_MODE_INT], intxhandler,
4856 IRQF_DISABLED, h->devname, h))
4857 return 0;
4858 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
4859 h->intr[PERF_MODE_INT], h->devname);
4860 return -1;
4861}
4862
4863static int __devinit cciss_kdump_soft_reset(ctlr_info_t *h)
4864{
4865 if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) {
4866 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4867 return -EIO;
4868 }
4869
4870 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4871 if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4872 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4873 return -1;
4874 }
4875
4876 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4877 if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4878 dev_warn(&h->pdev->dev, "Board failed to become ready "
4879 "after soft reset.\n");
4880 return -1;
4881 }
4882
4883 return 0;
4884}
4885
4886static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
4887{
4888 int ctlr = h->ctlr;
4889
4890 free_irq(h->intr[PERF_MODE_INT], h);
4891#ifdef CONFIG_PCI_MSI
4892 if (h->msix_vector)
4893 pci_disable_msix(h->pdev);
4894 else if (h->msi_vector)
4895 pci_disable_msi(h->pdev);
4896#endif /* CONFIG_PCI_MSI */
4897 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
4898 cciss_free_scatterlists(h);
4899 cciss_free_cmd_pool(h);
4900 kfree(h->blockFetchTable);
4901 if (h->reply_pool)
4902 pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
4903 h->reply_pool, h->reply_pool_dhandle);
4904 if (h->transtable)
4905 iounmap(h->transtable);
4906 if (h->cfgtable)
4907 iounmap(h->cfgtable);
4908 if (h->vaddr)
4909 iounmap(h->vaddr);
4910 unregister_blkdev(h->major, h->devname);
4911 cciss_destroy_hba_sysfs_entry(h);
4912 pci_release_regions(h->pdev);
4913 kfree(h);
4914 hba[ctlr] = NULL;
4915}
4916
4594/* 4917/*
4595 * This is it. Find all the controllers and register them. I really hate 4918 * This is it. Find all the controllers and register them. I really hate
4596 * stealing all these major device numbers. 4919 * stealing all these major device numbers.
@@ -4601,15 +4924,28 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4601{ 4924{
4602 int i; 4925 int i;
4603 int j = 0; 4926 int j = 0;
4604 int k = 0;
4605 int rc; 4927 int rc;
4928 int try_soft_reset = 0;
4606 int dac, return_code; 4929 int dac, return_code;
4607 InquiryData_struct *inq_buff; 4930 InquiryData_struct *inq_buff;
4608 ctlr_info_t *h; 4931 ctlr_info_t *h;
4932 unsigned long flags;
4609 4933
4610 rc = cciss_init_reset_devices(pdev); 4934 rc = cciss_init_reset_devices(pdev);
4611 if (rc) 4935 if (rc) {
4612 return rc; 4936 if (rc != -ENOTSUPP)
4937 return rc;
4938 /* If the reset fails in a particular way (it has no way to do
4939 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4940 * a soft reset once we get the controller configured up to the
4941 * point that it can accept a command.
4942 */
4943 try_soft_reset = 1;
4944 rc = 0;
4945 }
4946
4947reinit_after_soft_reset:
4948
4613 i = alloc_cciss_hba(pdev); 4949 i = alloc_cciss_hba(pdev);
4614 if (i < 0) 4950 if (i < 0)
4615 return -1; 4951 return -1;
@@ -4627,6 +4963,11 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4627 sprintf(h->devname, "cciss%d", i); 4963 sprintf(h->devname, "cciss%d", i);
4628 h->ctlr = i; 4964 h->ctlr = i;
4629 4965
4966 if (cciss_tape_cmds < 2)
4967 cciss_tape_cmds = 2;
4968 if (cciss_tape_cmds > 16)
4969 cciss_tape_cmds = 16;
4970
4630 init_completion(&h->scan_wait); 4971 init_completion(&h->scan_wait);
4631 4972
4632 if (cciss_create_hba_sysfs_entry(h)) 4973 if (cciss_create_hba_sysfs_entry(h))
@@ -4662,62 +5003,20 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4662 5003
4663 /* make sure the board interrupts are off */ 5004 /* make sure the board interrupts are off */
4664 h->access.set_intr_mask(h, CCISS_INTR_OFF); 5005 h->access.set_intr_mask(h, CCISS_INTR_OFF);
4665 if (h->msi_vector || h->msix_vector) { 5006 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
4666 if (request_irq(h->intr[PERF_MODE_INT], 5007 if (rc)
4667 do_cciss_msix_intr, 5008 goto clean2;
4668 IRQF_DISABLED, h->devname, h)) {
4669 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
4670 h->intr[PERF_MODE_INT], h->devname);
4671 goto clean2;
4672 }
4673 } else {
4674 if (request_irq(h->intr[PERF_MODE_INT], do_cciss_intx,
4675 IRQF_DISABLED, h->devname, h)) {
4676 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
4677 h->intr[PERF_MODE_INT], h->devname);
4678 goto clean2;
4679 }
4680 }
4681 5009
4682 dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", 5010 dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
4683 h->devname, pdev->device, pci_name(pdev), 5011 h->devname, pdev->device, pci_name(pdev),
4684 h->intr[PERF_MODE_INT], dac ? "" : " not"); 5012 h->intr[PERF_MODE_INT], dac ? "" : " not");
4685 5013
4686 h->cmd_pool_bits = 5014 if (cciss_allocate_cmd_pool(h))
4687 kmalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
4688 * sizeof(unsigned long), GFP_KERNEL);
4689 h->cmd_pool = (CommandList_struct *)
4690 pci_alloc_consistent(h->pdev,
4691 h->nr_cmds * sizeof(CommandList_struct),
4692 &(h->cmd_pool_dhandle));
4693 h->errinfo_pool = (ErrorInfo_struct *)
4694 pci_alloc_consistent(h->pdev,
4695 h->nr_cmds * sizeof(ErrorInfo_struct),
4696 &(h->errinfo_pool_dhandle));
4697 if ((h->cmd_pool_bits == NULL)
4698 || (h->cmd_pool == NULL)
4699 || (h->errinfo_pool == NULL)) {
4700 dev_err(&h->pdev->dev, "out of memory");
4701 goto clean4; 5015 goto clean4;
4702 }
4703 5016
4704 /* Need space for temp scatter list */ 5017 if (cciss_allocate_scatterlists(h))
4705 h->scatter_list = kmalloc(h->max_commands *
4706 sizeof(struct scatterlist *),
4707 GFP_KERNEL);
4708 if (!h->scatter_list)
4709 goto clean4; 5018 goto clean4;
4710 5019
4711 for (k = 0; k < h->nr_cmds; k++) {
4712 h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
4713 h->maxsgentries,
4714 GFP_KERNEL);
4715 if (h->scatter_list[k] == NULL) {
4716 dev_err(&h->pdev->dev,
4717 "could not allocate s/g lists\n");
4718 goto clean4;
4719 }
4720 }
4721 h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, 5020 h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
4722 h->chainsize, h->nr_cmds); 5021 h->chainsize, h->nr_cmds);
4723 if (!h->cmd_sg_list && h->chainsize > 0) 5022 if (!h->cmd_sg_list && h->chainsize > 0)
@@ -4741,6 +5040,62 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4741 h->gendisk[j] = NULL; 5040 h->gendisk[j] = NULL;
4742 } 5041 }
4743 5042
5043 /* At this point, the controller is ready to take commands.
5044 * Now, if reset_devices and the hard reset didn't work, try
5045 * the soft reset and see if that works.
5046 */
5047 if (try_soft_reset) {
5048
5049 /* This is kind of gross. We may or may not get a completion
5050 * from the soft reset command, and if we do, then the value
5051 * from the fifo may or may not be valid. So, we wait 10 secs
5052 * after the reset throwing away any completions we get during
5053 * that time. Unregister the interrupt handler and register
5054 * fake ones to scoop up any residual completions.
5055 */
5056 spin_lock_irqsave(&h->lock, flags);
5057 h->access.set_intr_mask(h, CCISS_INTR_OFF);
5058 spin_unlock_irqrestore(&h->lock, flags);
5059 free_irq(h->intr[PERF_MODE_INT], h);
5060 rc = cciss_request_irq(h, cciss_msix_discard_completions,
5061 cciss_intx_discard_completions);
5062 if (rc) {
5063 dev_warn(&h->pdev->dev, "Failed to request_irq after "
5064 "soft reset.\n");
5065 goto clean4;
5066 }
5067
5068 rc = cciss_kdump_soft_reset(h);
5069 if (rc) {
5070 dev_warn(&h->pdev->dev, "Soft reset failed.\n");
5071 goto clean4;
5072 }
5073
5074 dev_info(&h->pdev->dev, "Board READY.\n");
5075 dev_info(&h->pdev->dev,
5076 "Waiting for stale completions to drain.\n");
5077 h->access.set_intr_mask(h, CCISS_INTR_ON);
5078 msleep(10000);
5079 h->access.set_intr_mask(h, CCISS_INTR_OFF);
5080
5081 rc = controller_reset_failed(h->cfgtable);
5082 if (rc)
5083 dev_info(&h->pdev->dev,
5084 "Soft reset appears to have failed.\n");
5085
5086 /* since the controller's reset, we have to go back and re-init
5087 * everything. Easiest to just forget what we've done and do it
5088 * all over again.
5089 */
5090 cciss_undo_allocations_after_kdump_soft_reset(h);
5091 try_soft_reset = 0;
5092 if (rc)
5093 /* don't go to clean4, we already unallocated */
5094 return -ENODEV;
5095
5096 goto reinit_after_soft_reset;
5097 }
5098
4744 cciss_scsi_setup(h); 5099 cciss_scsi_setup(h);
4745 5100
4746 /* Turn the interrupts on so we can service requests */ 5101 /* Turn the interrupts on so we can service requests */
@@ -4775,21 +5130,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4775 return 1; 5130 return 1;
4776 5131
4777clean4: 5132clean4:
4778 kfree(h->cmd_pool_bits); 5133 cciss_free_cmd_pool(h);
4779 /* Free up sg elements */ 5134 cciss_free_scatterlists(h);
4780 for (k-- ; k >= 0; k--)
4781 kfree(h->scatter_list[k]);
4782 kfree(h->scatter_list);
4783 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 5135 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
4784 if (h->cmd_pool)
4785 pci_free_consistent(h->pdev,
4786 h->nr_cmds * sizeof(CommandList_struct),
4787 h->cmd_pool, h->cmd_pool_dhandle);
4788 if (h->errinfo_pool)
4789 pci_free_consistent(h->pdev,
4790 h->nr_cmds * sizeof(ErrorInfo_struct),
4791 h->errinfo_pool,
4792 h->errinfo_pool_dhandle);
4793 free_irq(h->intr[PERF_MODE_INT], h); 5136 free_irq(h->intr[PERF_MODE_INT], h);
4794clean2: 5137clean2:
4795 unregister_blkdev(h->major, h->devname); 5138 unregister_blkdev(h->major, h->devname);
@@ -4887,16 +5230,16 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4887 iounmap(h->cfgtable); 5230 iounmap(h->cfgtable);
4888 iounmap(h->vaddr); 5231 iounmap(h->vaddr);
4889 5232
4890 pci_free_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct), 5233 cciss_free_cmd_pool(h);
4891 h->cmd_pool, h->cmd_pool_dhandle);
4892 pci_free_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct),
4893 h->errinfo_pool, h->errinfo_pool_dhandle);
4894 kfree(h->cmd_pool_bits);
4895 /* Free up sg elements */ 5234 /* Free up sg elements */
4896 for (j = 0; j < h->nr_cmds; j++) 5235 for (j = 0; j < h->nr_cmds; j++)
4897 kfree(h->scatter_list[j]); 5236 kfree(h->scatter_list[j]);
4898 kfree(h->scatter_list); 5237 kfree(h->scatter_list);
4899 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 5238 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
5239 kfree(h->blockFetchTable);
5240 if (h->reply_pool)
5241 pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
5242 h->reply_pool, h->reply_pool_dhandle);
4900 /* 5243 /*
4901 * Deliberately omit pci_disable_device(): it does something nasty to 5244 * Deliberately omit pci_disable_device(): it does something nasty to
4902 * Smart Array controllers that pci_enable_device does not undo 5245 * Smart Array controllers that pci_enable_device does not undo
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 554bbd907d14..16b4d58d84dd 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -200,7 +200,7 @@ struct ctlr_info
200 * the above. 200 * the above.
201 */ 201 */
202#define CCISS_BOARD_READY_WAIT_SECS (120) 202#define CCISS_BOARD_READY_WAIT_SECS (120)
203#define CCISS_BOARD_NOT_READY_WAIT_SECS (10) 203#define CCISS_BOARD_NOT_READY_WAIT_SECS (100)
204#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) 204#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
205#define CCISS_BOARD_READY_ITERATIONS \ 205#define CCISS_BOARD_READY_ITERATIONS \
206 ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ 206 ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
@@ -209,8 +209,9 @@ struct ctlr_info
209 ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \ 209 ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
210 CCISS_BOARD_READY_POLL_INTERVAL_MSECS) 210 CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
211#define CCISS_POST_RESET_PAUSE_MSECS (3000) 211#define CCISS_POST_RESET_PAUSE_MSECS (3000)
212#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) 212#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (4000)
213#define CCISS_POST_RESET_NOOP_RETRIES (12) 213#define CCISS_POST_RESET_NOOP_RETRIES (12)
214#define CCISS_POST_RESET_NOOP_TIMEOUT_MSECS (10000)
214 215
215/* 216/*
216 Send the command to the hardware 217 Send the command to the hardware
@@ -239,11 +240,13 @@ static void SA5_intr_mask(ctlr_info_t *h, unsigned long val)
239 { /* Turn interrupts on */ 240 { /* Turn interrupts on */
240 h->interrupts_enabled = 1; 241 h->interrupts_enabled = 1;
241 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 242 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
243 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
242 } else /* Turn them off */ 244 } else /* Turn them off */
243 { 245 {
244 h->interrupts_enabled = 0; 246 h->interrupts_enabled = 0;
245 writel( SA5_INTR_OFF, 247 writel( SA5_INTR_OFF,
246 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 248 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
249 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
247 } 250 }
248} 251}
249/* 252/*
@@ -257,11 +260,13 @@ static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
257 { /* Turn interrupts on */ 260 { /* Turn interrupts on */
258 h->interrupts_enabled = 1; 261 h->interrupts_enabled = 1;
259 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 262 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
263 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
260 } else /* Turn them off */ 264 } else /* Turn them off */
261 { 265 {
262 h->interrupts_enabled = 0; 266 h->interrupts_enabled = 0;
263 writel( SA5B_INTR_OFF, 267 writel( SA5B_INTR_OFF,
264 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 268 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
269 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
265 } 270 }
266} 271}
267 272
@@ -271,10 +276,12 @@ static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
271 if (val) { /* turn on interrupts */ 276 if (val) { /* turn on interrupts */
272 h->interrupts_enabled = 1; 277 h->interrupts_enabled = 1;
273 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 278 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
279 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
274 } else { 280 } else {
275 h->interrupts_enabled = 0; 281 h->interrupts_enabled = 0;
276 writel(SA5_PERF_INTR_OFF, 282 writel(SA5_PERF_INTR_OFF,
277 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 283 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
284 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
278 } 285 }
279} 286}
280 287
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index cd441bef031f..d9be6b4d49a6 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -53,6 +53,7 @@
53#define CFGTBL_ChangeReq 0x00000001l 53#define CFGTBL_ChangeReq 0x00000001l
54#define CFGTBL_AccCmds 0x00000001l 54#define CFGTBL_AccCmds 0x00000001l
55#define DOORBELL_CTLR_RESET 0x00000004l 55#define DOORBELL_CTLR_RESET 0x00000004l
56#define DOORBELL_CTLR_RESET2 0x00000020l
56 57
57#define CFGTBL_Trans_Simple 0x00000002l 58#define CFGTBL_Trans_Simple 0x00000002l
58#define CFGTBL_Trans_Performant 0x00000004l 59#define CFGTBL_Trans_Performant 0x00000004l
@@ -142,6 +143,14 @@ typedef struct _ReadCapdata_struct_16
142#define BMIC_CACHE_FLUSH 0xc2 143#define BMIC_CACHE_FLUSH 0xc2
143#define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */ 144#define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */
144 145
146#define CCISS_ABORT_MSG 0x00
147#define CCISS_RESET_MSG 0x01
148#define CCISS_RESET_TYPE_CONTROLLER 0x00
149#define CCISS_RESET_TYPE_BUS 0x01
150#define CCISS_RESET_TYPE_TARGET 0x03
151#define CCISS_RESET_TYPE_LUN 0x04
152#define CCISS_NOOP_MSG 0x03
153
145/* Command List Structure */ 154/* Command List Structure */
146#define CTLR_LUNID "\0\0\0\0\0\0\0\0" 155#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
147 156
@@ -235,6 +244,8 @@ typedef struct _CfgTable_struct {
235 u8 reserved[0x78 - 0x58]; 244 u8 reserved[0x78 - 0x58];
236 u32 misc_fw_support; /* offset 0x78 */ 245 u32 misc_fw_support; /* offset 0x78 */
237#define MISC_FW_DOORBELL_RESET (0x02) 246#define MISC_FW_DOORBELL_RESET (0x02)
247#define MISC_FW_DOORBELL_RESET2 (0x10)
248 u8 driver_version[32];
238} CfgTable_struct; 249} CfgTable_struct;
239 250
240struct TransTable_struct { 251struct TransTable_struct {
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index df793803f5ae..696100241a6f 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -84,7 +84,6 @@ static struct scsi_host_template cciss_driver_template = {
84 .proc_name = "cciss", 84 .proc_name = "cciss",
85 .proc_info = cciss_scsi_proc_info, 85 .proc_info = cciss_scsi_proc_info,
86 .queuecommand = cciss_scsi_queue_command, 86 .queuecommand = cciss_scsi_queue_command,
87 .can_queue = SCSI_CCISS_CAN_QUEUE,
88 .this_id = 7, 87 .this_id = 7,
89 .cmd_per_lun = 1, 88 .cmd_per_lun = 1,
90 .use_clustering = DISABLE_CLUSTERING, 89 .use_clustering = DISABLE_CLUSTERING,
@@ -108,16 +107,13 @@ struct cciss_scsi_cmd_stack_elem_t {
108 107
109#pragma pack() 108#pragma pack()
110 109
111#define CMD_STACK_SIZE (SCSI_CCISS_CAN_QUEUE * \
112 CCISS_MAX_SCSI_DEVS_PER_HBA + 2)
113 // plus two for init time usage
114
115#pragma pack(1) 110#pragma pack(1)
116struct cciss_scsi_cmd_stack_t { 111struct cciss_scsi_cmd_stack_t {
117 struct cciss_scsi_cmd_stack_elem_t *pool; 112 struct cciss_scsi_cmd_stack_elem_t *pool;
118 struct cciss_scsi_cmd_stack_elem_t *elem[CMD_STACK_SIZE]; 113 struct cciss_scsi_cmd_stack_elem_t **elem;
119 dma_addr_t cmd_pool_handle; 114 dma_addr_t cmd_pool_handle;
120 int top; 115 int top;
116 int nelems;
121}; 117};
122#pragma pack() 118#pragma pack()
123 119
@@ -191,7 +187,7 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c)
191 sa = h->scsi_ctlr; 187 sa = h->scsi_ctlr;
192 stk = &sa->cmd_stack; 188 stk = &sa->cmd_stack;
193 stk->top++; 189 stk->top++;
194 if (stk->top >= CMD_STACK_SIZE) { 190 if (stk->top >= stk->nelems) {
195 dev_err(&h->pdev->dev, 191 dev_err(&h->pdev->dev,
196 "scsi_cmd_free called too many times.\n"); 192 "scsi_cmd_free called too many times.\n");
197 BUG(); 193 BUG();
@@ -206,13 +202,14 @@ scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
206 struct cciss_scsi_cmd_stack_t *stk; 202 struct cciss_scsi_cmd_stack_t *stk;
207 size_t size; 203 size_t size;
208 204
205 stk = &sa->cmd_stack;
206 stk->nelems = cciss_tape_cmds + 2;
209 sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, 207 sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
210 h->chainsize, CMD_STACK_SIZE); 208 h->chainsize, stk->nelems);
211 if (!sa->cmd_sg_list && h->chainsize > 0) 209 if (!sa->cmd_sg_list && h->chainsize > 0)
212 return -ENOMEM; 210 return -ENOMEM;
213 211
214 stk = &sa->cmd_stack; 212 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
215 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
216 213
217 /* Check alignment, see cciss_cmd.h near CommandList_struct def. */ 214 /* Check alignment, see cciss_cmd.h near CommandList_struct def. */
218 BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); 215 BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
@@ -221,18 +218,23 @@ scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
221 pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle); 218 pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle);
222 219
223 if (stk->pool == NULL) { 220 if (stk->pool == NULL) {
224 cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); 221 cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
225 sa->cmd_sg_list = NULL; 222 sa->cmd_sg_list = NULL;
226 return -ENOMEM; 223 return -ENOMEM;
227 } 224 }
228 225 stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL);
229 for (i=0; i<CMD_STACK_SIZE; i++) { 226 if (!stk->elem) {
227 pci_free_consistent(h->pdev, size, stk->pool,
228 stk->cmd_pool_handle);
229 return -1;
230 }
231 for (i = 0; i < stk->nelems; i++) {
230 stk->elem[i] = &stk->pool[i]; 232 stk->elem[i] = &stk->pool[i];
231 stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + 233 stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
232 (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i)); 234 (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
233 stk->elem[i]->cmdindex = i; 235 stk->elem[i]->cmdindex = i;
234 } 236 }
235 stk->top = CMD_STACK_SIZE-1; 237 stk->top = stk->nelems-1;
236 return 0; 238 return 0;
237} 239}
238 240
@@ -245,16 +247,18 @@ scsi_cmd_stack_free(ctlr_info_t *h)
245 247
246 sa = h->scsi_ctlr; 248 sa = h->scsi_ctlr;
247 stk = &sa->cmd_stack; 249 stk = &sa->cmd_stack;
248 if (stk->top != CMD_STACK_SIZE-1) { 250 if (stk->top != stk->nelems-1) {
249 dev_warn(&h->pdev->dev, 251 dev_warn(&h->pdev->dev,
250 "bug: %d scsi commands are still outstanding.\n", 252 "bug: %d scsi commands are still outstanding.\n",
251 CMD_STACK_SIZE - stk->top); 253 stk->nelems - stk->top);
252 } 254 }
253 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; 255 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
254 256
255 pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); 257 pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle);
256 stk->pool = NULL; 258 stk->pool = NULL;
257 cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); 259 cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
260 kfree(stk->elem);
261 stk->elem = NULL;
258} 262}
259 263
260#if 0 264#if 0
@@ -859,6 +863,7 @@ cciss_scsi_detect(ctlr_info_t *h)
859 sh->io_port = 0; // good enough? FIXME, 863 sh->io_port = 0; // good enough? FIXME,
860 sh->n_io_port = 0; // I don't think we use these two... 864 sh->n_io_port = 0; // I don't think we use these two...
861 sh->this_id = SELF_SCSI_ID; 865 sh->this_id = SELF_SCSI_ID;
866 sh->can_queue = cciss_tape_cmds;
862 sh->sg_tablesize = h->maxsgentries; 867 sh->sg_tablesize = h->maxsgentries;
863 sh->max_cmd_len = MAX_COMMAND_SIZE; 868 sh->max_cmd_len = MAX_COMMAND_SIZE;
864 869
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
index 6d5822fe851a..e71d986727ca 100644
--- a/drivers/block/cciss_scsi.h
+++ b/drivers/block/cciss_scsi.h
@@ -36,13 +36,9 @@
36 addressible natively, and may in fact turn 36 addressible natively, and may in fact turn
37 out to be not scsi at all. */ 37 out to be not scsi at all. */
38 38
39#define SCSI_CCISS_CAN_QUEUE 2
40 39
41/* 40/*
42 41
43Note, cmd_per_lun could give us some trouble, so I'm setting it very low.
44Likewise, SCSI_CCISS_CAN_QUEUE is set very conservatively.
45
46If the upper scsi layer tries to track how many commands we have 42If the upper scsi layer tries to track how many commands we have
47outstanding, it will be operating under the misapprehension that it is 43outstanding, it will be operating under the misapprehension that it is
48the only one sending us requests. We also have the block interface, 44the only one sending us requests. We also have the block interface,
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index c6828b68d77b..09ef9a878ef0 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -28,7 +28,7 @@
28#include "drbd_int.h" 28#include "drbd_int.h"
29#include "drbd_wrappers.h" 29#include "drbd_wrappers.h"
30 30
31/* We maintain a trivial check sum in our on disk activity log. 31/* We maintain a trivial checksum in our on disk activity log.
32 * With that we can ensure correct operation even when the storage 32 * With that we can ensure correct operation even when the storage
33 * device might do a partial (last) sector write while losing power. 33 * device might do a partial (last) sector write while losing power.
34 */ 34 */
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 76210ba401ac..f440a02dfdb1 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -74,7 +74,7 @@
74 * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage 74 * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage
75 * seems excessive. 75 * seems excessive.
76 * 76 *
77 * We plan to reduce the amount of in-core bitmap pages by pageing them in 77 * We plan to reduce the amount of in-core bitmap pages by paging them in
78 * and out against their on-disk location as necessary, but need to make 78 * and out against their on-disk location as necessary, but need to make
79 * sure we don't cause too much meta data IO, and must not deadlock in 79 * sure we don't cause too much meta data IO, and must not deadlock in
80 * tight memory situations. This needs some more work. 80 * tight memory situations. This needs some more work.
@@ -200,7 +200,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
200 * we if bits have been cleared since last IO. */ 200 * we if bits have been cleared since last IO. */
201#define BM_PAGE_LAZY_WRITEOUT 28 201#define BM_PAGE_LAZY_WRITEOUT 28
202 202
203/* store_page_idx uses non-atomic assingment. It is only used directly after 203/* store_page_idx uses non-atomic assignment. It is only used directly after
204 * allocating the page. All other bm_set_page_* and bm_clear_page_* need to 204 * allocating the page. All other bm_set_page_* and bm_clear_page_* need to
205 * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap 205 * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap
206 * changes) may happen from various contexts, and wait_on_bit/wake_up_bit 206 * changes) may happen from various contexts, and wait_on_bit/wake_up_bit
@@ -318,7 +318,7 @@ static void bm_unmap(unsigned long *p_addr)
318/* word offset from start of bitmap to word number _in_page_ 318/* word offset from start of bitmap to word number _in_page_
319 * modulo longs per page 319 * modulo longs per page
320#define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) 320#define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
321 hm, well, Philipp thinks gcc might not optimze the % into & (... - 1) 321 hm, well, Philipp thinks gcc might not optimize the % into & (... - 1)
322 so do it explicitly: 322 so do it explicitly:
323 */ 323 */
324#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) 324#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d871b14ed5a1..ef2ceed3be4b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -700,7 +700,7 @@ struct drbd_request {
700 * see drbd_endio_pri(). */ 700 * see drbd_endio_pri(). */
701 struct bio *private_bio; 701 struct bio *private_bio;
702 702
703 struct hlist_node colision; 703 struct hlist_node collision;
704 sector_t sector; 704 sector_t sector;
705 unsigned int size; 705 unsigned int size;
706 unsigned int epoch; /* barrier_nr */ 706 unsigned int epoch; /* barrier_nr */
@@ -766,7 +766,7 @@ struct digest_info {
766 766
767struct drbd_epoch_entry { 767struct drbd_epoch_entry {
768 struct drbd_work w; 768 struct drbd_work w;
769 struct hlist_node colision; 769 struct hlist_node collision;
770 struct drbd_epoch *epoch; /* for writes */ 770 struct drbd_epoch *epoch; /* for writes */
771 struct drbd_conf *mdev; 771 struct drbd_conf *mdev;
772 struct page *pages; 772 struct page *pages;
@@ -1129,6 +1129,8 @@ struct drbd_conf {
1129 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ 1129 int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
1130 int rs_planed; /* resync sectors already planned */ 1130 int rs_planed; /* resync sectors already planned */
1131 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ 1131 atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
1132 int peer_max_bio_size;
1133 int local_max_bio_size;
1132}; 1134};
1133 1135
1134static inline struct drbd_conf *minor_to_mdev(unsigned int minor) 1136static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1218,8 +1220,6 @@ extern void drbd_free_resources(struct drbd_conf *mdev);
1218extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, 1220extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
1219 unsigned int set_size); 1221 unsigned int set_size);
1220extern void tl_clear(struct drbd_conf *mdev); 1222extern void tl_clear(struct drbd_conf *mdev);
1221enum drbd_req_event;
1222extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
1223extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *); 1223extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
1224extern void drbd_free_sock(struct drbd_conf *mdev); 1224extern void drbd_free_sock(struct drbd_conf *mdev);
1225extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, 1225extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
@@ -1434,6 +1434,7 @@ struct bm_extent {
1434 * hash table. */ 1434 * hash table. */
1435#define HT_SHIFT 8 1435#define HT_SHIFT 8
1436#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) 1436#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
1437#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */
1437 1438
1438#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ 1439#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
1439 1440
@@ -1518,9 +1519,9 @@ extern void drbd_resume_io(struct drbd_conf *mdev);
1518extern char *ppsize(char *buf, unsigned long long size); 1519extern char *ppsize(char *buf, unsigned long long size);
1519extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); 1520extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
1520enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; 1521enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
1521extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); 1522extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
1522extern void resync_after_online_grow(struct drbd_conf *); 1523extern void resync_after_online_grow(struct drbd_conf *);
1523extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); 1524extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
1524extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, 1525extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
1525 enum drbd_role new_role, 1526 enum drbd_role new_role,
1526 int force); 1527 int force);
@@ -1828,6 +1829,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
1828 if (!forcedetach) { 1829 if (!forcedetach) {
1829 if (__ratelimit(&drbd_ratelimit_state)) 1830 if (__ratelimit(&drbd_ratelimit_state))
1830 dev_err(DEV, "Local IO failed in %s.\n", where); 1831 dev_err(DEV, "Local IO failed in %s.\n", where);
1832 if (mdev->state.disk > D_INCONSISTENT)
1833 _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
1831 break; 1834 break;
1832 } 1835 }
1833 /* NOTE fall through to detach case if forcedetach set */ 1836 /* NOTE fall through to detach case if forcedetach set */
@@ -2153,6 +2156,10 @@ static inline int get_net_conf(struct drbd_conf *mdev)
2153static inline void put_ldev(struct drbd_conf *mdev) 2156static inline void put_ldev(struct drbd_conf *mdev)
2154{ 2157{
2155 int i = atomic_dec_return(&mdev->local_cnt); 2158 int i = atomic_dec_return(&mdev->local_cnt);
2159
2160 /* This may be called from some endio handler,
2161 * so we must not sleep here. */
2162
2156 __release(local); 2163 __release(local);
2157 D_ASSERT(i >= 0); 2164 D_ASSERT(i >= 0);
2158 if (i == 0) { 2165 if (i == 0) {
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 5b525c179f39..0358e55356c8 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -745,6 +745,9 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
745 mdev->agreed_pro_version < 88) 745 mdev->agreed_pro_version < 88)
746 rv = SS_NOT_SUPPORTED; 746 rv = SS_NOT_SUPPORTED;
747 747
748 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
749 rv = SS_CONNECTED_OUTDATES;
750
748 return rv; 751 return rv;
749} 752}
750 753
@@ -1565,6 +1568,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1565 put_ldev(mdev); 1568 put_ldev(mdev);
1566 } 1569 }
1567 1570
1571 /* Notify peer that I had a local IO error, and did not detached.. */
1572 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1573 drbd_send_state(mdev);
1574
1568 /* Disks got bigger while they were detached */ 1575 /* Disks got bigger while they were detached */
1569 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING && 1576 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1570 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) { 1577 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
@@ -2064,7 +2071,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2064{ 2071{
2065 struct p_sizes p; 2072 struct p_sizes p;
2066 sector_t d_size, u_size; 2073 sector_t d_size, u_size;
2067 int q_order_type; 2074 int q_order_type, max_bio_size;
2068 int ok; 2075 int ok;
2069 2076
2070 if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 2077 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
@@ -2072,17 +2079,20 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
2072 d_size = drbd_get_max_capacity(mdev->ldev); 2079 d_size = drbd_get_max_capacity(mdev->ldev);
2073 u_size = mdev->ldev->dc.disk_size; 2080 u_size = mdev->ldev->dc.disk_size;
2074 q_order_type = drbd_queue_order_type(mdev); 2081 q_order_type = drbd_queue_order_type(mdev);
2082 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2083 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
2075 put_ldev(mdev); 2084 put_ldev(mdev);
2076 } else { 2085 } else {
2077 d_size = 0; 2086 d_size = 0;
2078 u_size = 0; 2087 u_size = 0;
2079 q_order_type = QUEUE_ORDERED_NONE; 2088 q_order_type = QUEUE_ORDERED_NONE;
2089 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
2080 } 2090 }
2081 2091
2082 p.d_size = cpu_to_be64(d_size); 2092 p.d_size = cpu_to_be64(d_size);
2083 p.u_size = cpu_to_be64(u_size); 2093 p.u_size = cpu_to_be64(u_size);
2084 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); 2094 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
2085 p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9); 2095 p.max_bio_size = cpu_to_be32(max_bio_size);
2086 p.queue_order_type = cpu_to_be16(q_order_type); 2096 p.queue_order_type = cpu_to_be16(q_order_type);
2087 p.dds_flags = cpu_to_be16(flags); 2097 p.dds_flags = cpu_to_be16(flags);
2088 2098
@@ -2722,7 +2732,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2722 2732
2723 /* double check digest, sometimes buffers have been modified in flight. */ 2733 /* double check digest, sometimes buffers have been modified in flight. */
2724 if (dgs > 0 && dgs <= 64) { 2734 if (dgs > 0 && dgs <= 64) {
2725 /* 64 byte, 512 bit, is the larges digest size 2735 /* 64 byte, 512 bit, is the largest digest size
2726 * currently supported in kernel crypto. */ 2736 * currently supported in kernel crypto. */
2727 unsigned char digest[64]; 2737 unsigned char digest[64];
2728 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest); 2738 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
@@ -3041,6 +3051,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
3041 mdev->agreed_pro_version = PRO_VERSION_MAX; 3051 mdev->agreed_pro_version = PRO_VERSION_MAX;
3042 mdev->write_ordering = WO_bdev_flush; 3052 mdev->write_ordering = WO_bdev_flush;
3043 mdev->resync_wenr = LC_FREE; 3053 mdev->resync_wenr = LC_FREE;
3054 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3055 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3044} 3056}
3045 3057
3046void drbd_mdev_cleanup(struct drbd_conf *mdev) 3058void drbd_mdev_cleanup(struct drbd_conf *mdev)
@@ -3275,7 +3287,7 @@ static void drbd_delete_device(unsigned int minor)
3275 3287
3276 drbd_release_ee_lists(mdev); 3288 drbd_release_ee_lists(mdev);
3277 3289
3278 /* should be free'd on disconnect? */ 3290 /* should be freed on disconnect? */
3279 kfree(mdev->ee_hash); 3291 kfree(mdev->ee_hash);
3280 /* 3292 /*
3281 mdev->ee_hash_s = 0; 3293 mdev->ee_hash_s = 0;
@@ -3415,7 +3427,9 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
3415 q->backing_dev_info.congested_data = mdev; 3427 q->backing_dev_info.congested_data = mdev;
3416 3428
3417 blk_queue_make_request(q, drbd_make_request); 3429 blk_queue_make_request(q, drbd_make_request);
3418 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9); 3430 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3431 This triggers a max_bio_size message upon first attach or connect */
3432 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
3419 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 3433 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3420 blk_queue_merge_bvec(q, drbd_merge_bvec); 3434 blk_queue_merge_bvec(q, drbd_merge_bvec);
3421 q->queue_lock = &mdev->req_lock; 3435 q->queue_lock = &mdev->req_lock;
@@ -3627,7 +3641,8 @@ struct meta_data_on_disk {
3627 /* `-- act_log->nr_elements <-- sync_conf.al_extents */ 3641 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3628 u32 bm_offset; /* offset to the bitmap, from here */ 3642 u32 bm_offset; /* offset to the bitmap, from here */
3629 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ 3643 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3630 u32 reserved_u32[4]; 3644 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3645 u32 reserved_u32[3];
3631 3646
3632} __packed; 3647} __packed;
3633 3648
@@ -3668,6 +3683,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
3668 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid); 3683 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3669 3684
3670 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset); 3685 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3686 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
3671 3687
3672 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset); 3688 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3673 sector = mdev->ldev->md.md_offset; 3689 sector = mdev->ldev->md.md_offset;
@@ -3751,6 +3767,15 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3751 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents); 3767 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3752 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); 3768 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3753 3769
3770 spin_lock_irq(&mdev->req_lock);
3771 if (mdev->state.conn < C_CONNECTED) {
3772 int peer;
3773 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3774 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3775 mdev->peer_max_bio_size = peer;
3776 }
3777 spin_unlock_irq(&mdev->req_lock);
3778
3754 if (mdev->sync_conf.al_extents < 7) 3779 if (mdev->sync_conf.al_extents < 7)
3755 mdev->sync_conf.al_extents = 127; 3780 mdev->sync_conf.al_extents = 127;
3756 3781
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 03b29f78a37d..515bcd948a43 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -272,9 +272,28 @@ static int _try_outdate_peer_async(void *data)
272{ 272{
273 struct drbd_conf *mdev = (struct drbd_conf *)data; 273 struct drbd_conf *mdev = (struct drbd_conf *)data;
274 enum drbd_disk_state nps; 274 enum drbd_disk_state nps;
275 union drbd_state ns;
275 276
276 nps = drbd_try_outdate_peer(mdev); 277 nps = drbd_try_outdate_peer(mdev);
277 drbd_request_state(mdev, NS(pdsk, nps)); 278
279 /* Not using
280 drbd_request_state(mdev, NS(pdsk, nps));
281 here, because we might were able to re-establish the connection
282 in the meantime. This can only partially be solved in the state's
283 engine is_valid_state() and is_valid_state_transition()
284 functions.
285
286 nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
287 pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
288 therefore we have to have the pre state change check here.
289 */
290 spin_lock_irq(&mdev->req_lock);
291 ns = mdev->state;
292 if (ns.conn < C_WF_REPORT_PARAMS) {
293 ns.pdsk = nps;
294 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
295 }
296 spin_unlock_irq(&mdev->req_lock);
278 297
279 return 0; 298 return 0;
280} 299}
@@ -577,7 +596,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
577 * Returns 0 on success, negative return values indicate errors. 596 * Returns 0 on success, negative return values indicate errors.
578 * You should call drbd_md_sync() after calling this function. 597 * You should call drbd_md_sync() after calling this function.
579 */ 598 */
580enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) 599enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
581{ 600{
582 sector_t prev_first_sect, prev_size; /* previous meta location */ 601 sector_t prev_first_sect, prev_size; /* previous meta location */
583 sector_t la_size; 602 sector_t la_size;
@@ -773,30 +792,78 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
773 return 0; 792 return 0;
774} 793}
775 794
776void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local) 795static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
777{ 796{
778 struct request_queue * const q = mdev->rq_queue; 797 struct request_queue * const q = mdev->rq_queue;
779 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 798 int max_hw_sectors = max_bio_size >> 9;
780 int max_segments = mdev->ldev->dc.max_bio_bvecs; 799 int max_segments = 0;
781 int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); 800
801 if (get_ldev_if_state(mdev, D_ATTACHING)) {
802 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
803
804 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
805 max_segments = mdev->ldev->dc.max_bio_bvecs;
806 put_ldev(mdev);
807 }
782 808
783 blk_queue_logical_block_size(q, 512); 809 blk_queue_logical_block_size(q, 512);
784 blk_queue_max_hw_sectors(q, max_hw_sectors); 810 blk_queue_max_hw_sectors(q, max_hw_sectors);
785 /* This is the workaround for "bio would need to, but cannot, be split" */ 811 /* This is the workaround for "bio would need to, but cannot, be split" */
786 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); 812 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
787 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); 813 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
788 blk_queue_stack_limits(q, b);
789 814
790 dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9); 815 if (get_ldev_if_state(mdev, D_ATTACHING)) {
816 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
817
818 blk_queue_stack_limits(q, b);
791 819
792 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 820 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
793 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 821 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
794 q->backing_dev_info.ra_pages, 822 q->backing_dev_info.ra_pages,
795 b->backing_dev_info.ra_pages); 823 b->backing_dev_info.ra_pages);
796 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 824 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
825 }
826 put_ldev(mdev);
797 } 827 }
798} 828}
799 829
830void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
831{
832 int now, new, local, peer;
833
834 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
835 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
836 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
837
838 if (get_ldev_if_state(mdev, D_ATTACHING)) {
839 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
840 mdev->local_max_bio_size = local;
841 put_ldev(mdev);
842 }
843
844 /* We may ignore peer limits if the peer is modern enough.
845 Because new from 8.3.8 onwards the peer can use multiple
846 BIOs for a single peer_request */
847 if (mdev->state.conn >= C_CONNECTED) {
848 if (mdev->agreed_pro_version < 94)
849 peer = mdev->peer_max_bio_size;
850 else if (mdev->agreed_pro_version == 94)
851 peer = DRBD_MAX_SIZE_H80_PACKET;
852 else /* drbd 8.3.8 onwards */
853 peer = DRBD_MAX_BIO_SIZE;
854 }
855
856 new = min_t(int, local, peer);
857
858 if (mdev->state.role == R_PRIMARY && new < now)
859 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
860
861 if (new != now)
862 dev_info(DEV, "max BIO size = %u\n", new);
863
864 drbd_setup_queue_param(mdev, new);
865}
866
800/* serialize deconfig (worker exiting, doing cleanup) 867/* serialize deconfig (worker exiting, doing cleanup)
801 * and reconfig (drbdsetup disk, drbdsetup net) 868 * and reconfig (drbdsetup disk, drbdsetup net)
802 * 869 *
@@ -865,7 +932,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
865 struct block_device *bdev; 932 struct block_device *bdev;
866 struct lru_cache *resync_lru = NULL; 933 struct lru_cache *resync_lru = NULL;
867 union drbd_state ns, os; 934 union drbd_state ns, os;
868 unsigned int max_bio_size;
869 enum drbd_state_rv rv; 935 enum drbd_state_rv rv;
870 int cp_discovered = 0; 936 int cp_discovered = 0;
871 int logical_block_size; 937 int logical_block_size;
@@ -1117,20 +1183,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1117 mdev->read_cnt = 0; 1183 mdev->read_cnt = 0;
1118 mdev->writ_cnt = 0; 1184 mdev->writ_cnt = 0;
1119 1185
1120 max_bio_size = DRBD_MAX_BIO_SIZE; 1186 drbd_reconsider_max_bio_size(mdev);
1121 if (mdev->state.conn == C_CONNECTED) {
1122 /* We are Primary, Connected, and now attach a new local
1123 * backing store. We must not increase the user visible maximum
1124 * bio size on this device to something the peer may not be
1125 * able to handle. */
1126 if (mdev->agreed_pro_version < 94)
1127 max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
1128 else if (mdev->agreed_pro_version == 94)
1129 max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
1130 /* else: drbd 8.3.9 and later, stay with default */
1131 }
1132
1133 drbd_setup_queue_param(mdev, max_bio_size);
1134 1187
1135 /* If I am currently not R_PRIMARY, 1188 /* If I am currently not R_PRIMARY,
1136 * but meta data primary indicator is set, 1189 * but meta data primary indicator is set,
@@ -1152,7 +1205,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1152 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1205 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1153 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1206 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1154 1207
1155 dd = drbd_determin_dev_size(mdev, 0); 1208 dd = drbd_determine_dev_size(mdev, 0);
1156 if (dd == dev_size_error) { 1209 if (dd == dev_size_error) {
1157 retcode = ERR_NOMEM_BITMAP; 1210 retcode = ERR_NOMEM_BITMAP;
1158 goto force_diskless_dec; 1211 goto force_diskless_dec;
@@ -1281,11 +1334,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1281static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1334static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1282 struct drbd_nl_cfg_reply *reply) 1335 struct drbd_nl_cfg_reply *reply)
1283{ 1336{
1337 enum drbd_ret_code retcode;
1338 int ret;
1284 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */ 1339 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1285 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1340 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1286 if (mdev->state.disk == D_DISKLESS) 1341 /* D_FAILED will transition to DISKLESS. */
1287 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); 1342 ret = wait_event_interruptible(mdev->misc_wait,
1343 mdev->state.disk != D_FAILED);
1288 drbd_resume_io(mdev); 1344 drbd_resume_io(mdev);
1345 if ((int)retcode == (int)SS_IS_DISKLESS)
1346 retcode = SS_NOTHING_TO_DO;
1347 if (ret)
1348 retcode = ERR_INTR;
1349 reply->ret_code = retcode;
1289 return 0; 1350 return 0;
1290} 1351}
1291 1352
@@ -1658,7 +1719,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1658 1719
1659 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1720 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1660 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); 1721 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
1661 dd = drbd_determin_dev_size(mdev, ddsf); 1722 dd = drbd_determine_dev_size(mdev, ddsf);
1662 drbd_md_sync(mdev); 1723 drbd_md_sync(mdev);
1663 put_ldev(mdev); 1724 put_ldev(mdev);
1664 if (dd == dev_size_error) { 1725 if (dd == dev_size_error) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index fd26666c0b08..25d32c5aa50a 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -333,7 +333,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
333 if (!page) 333 if (!page)
334 goto fail; 334 goto fail;
335 335
336 INIT_HLIST_NODE(&e->colision); 336 INIT_HLIST_NODE(&e->collision);
337 e->epoch = NULL; 337 e->epoch = NULL;
338 e->mdev = mdev; 338 e->mdev = mdev;
339 e->pages = page; 339 e->pages = page;
@@ -356,7 +356,7 @@ void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int i
356 kfree(e->digest); 356 kfree(e->digest);
357 drbd_pp_free(mdev, e->pages, is_net); 357 drbd_pp_free(mdev, e->pages, is_net);
358 D_ASSERT(atomic_read(&e->pending_bios) == 0); 358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
359 D_ASSERT(hlist_unhashed(&e->colision)); 359 D_ASSERT(hlist_unhashed(&e->collision));
360 mempool_free(e, drbd_ee_mempool); 360 mempool_free(e, drbd_ee_mempool);
361} 361}
362 362
@@ -787,7 +787,7 @@ static int drbd_connect(struct drbd_conf *mdev)
787 } 787 }
788 788
789 if (sock && msock) { 789 if (sock && msock) {
790 schedule_timeout_interruptible(HZ / 10); 790 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
791 ok = drbd_socket_okay(mdev, &sock); 791 ok = drbd_socket_okay(mdev, &sock);
792 ok = drbd_socket_okay(mdev, &msock) && ok; 792 ok = drbd_socket_okay(mdev, &msock) && ok;
793 if (ok) 793 if (ok)
@@ -899,11 +899,6 @@ retry:
899 899
900 drbd_thread_start(&mdev->asender); 900 drbd_thread_start(&mdev->asender);
901 901
902 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
903 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
904 put_ldev(mdev);
905 }
906
907 if (drbd_send_protocol(mdev) == -1) 902 if (drbd_send_protocol(mdev) == -1)
908 return -1; 903 return -1;
909 drbd_send_sync_param(mdev, &mdev->sync_conf); 904 drbd_send_sync_param(mdev, &mdev->sync_conf);
@@ -1418,7 +1413,7 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u
1418 sector_t sector = e->sector; 1413 sector_t sector = e->sector;
1419 int ok; 1414 int ok;
1420 1415
1421 D_ASSERT(hlist_unhashed(&e->colision)); 1416 D_ASSERT(hlist_unhashed(&e->collision));
1422 1417
1423 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1418 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1424 drbd_set_in_sync(mdev, sector, e->size); 1419 drbd_set_in_sync(mdev, sector, e->size);
@@ -1487,7 +1482,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
1487 return false; 1482 return false;
1488 } 1483 }
1489 1484
1490 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid 1485 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1491 * special casing it there for the various failure cases. 1486 * special casing it there for the various failure cases.
1492 * still no race with drbd_fail_pending_reads */ 1487 * still no race with drbd_fail_pending_reads */
1493 ok = recv_dless_read(mdev, req, sector, data_size); 1488 ok = recv_dless_read(mdev, req, sector, data_size);
@@ -1558,11 +1553,11 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1558 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1553 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1559 if (mdev->net_conf->two_primaries) { 1554 if (mdev->net_conf->two_primaries) {
1560 spin_lock_irq(&mdev->req_lock); 1555 spin_lock_irq(&mdev->req_lock);
1561 D_ASSERT(!hlist_unhashed(&e->colision)); 1556 D_ASSERT(!hlist_unhashed(&e->collision));
1562 hlist_del_init(&e->colision); 1557 hlist_del_init(&e->collision);
1563 spin_unlock_irq(&mdev->req_lock); 1558 spin_unlock_irq(&mdev->req_lock);
1564 } else { 1559 } else {
1565 D_ASSERT(hlist_unhashed(&e->colision)); 1560 D_ASSERT(hlist_unhashed(&e->collision));
1566 } 1561 }
1567 1562
1568 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 1563 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
@@ -1579,8 +1574,8 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
1579 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); 1574 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1580 1575
1581 spin_lock_irq(&mdev->req_lock); 1576 spin_lock_irq(&mdev->req_lock);
1582 D_ASSERT(!hlist_unhashed(&e->colision)); 1577 D_ASSERT(!hlist_unhashed(&e->collision));
1583 hlist_del_init(&e->colision); 1578 hlist_del_init(&e->collision);
1584 spin_unlock_irq(&mdev->req_lock); 1579 spin_unlock_irq(&mdev->req_lock);
1585 1580
1586 dec_unacked(mdev); 1581 dec_unacked(mdev);
@@ -1755,7 +1750,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1755 1750
1756 spin_lock_irq(&mdev->req_lock); 1751 spin_lock_irq(&mdev->req_lock);
1757 1752
1758 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector)); 1753 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
1759 1754
1760#define OVERLAPS overlaps(i->sector, i->size, sector, size) 1755#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1761 slot = tl_hash_slot(mdev, sector); 1756 slot = tl_hash_slot(mdev, sector);
@@ -1765,7 +1760,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1765 int have_conflict = 0; 1760 int have_conflict = 0;
1766 prepare_to_wait(&mdev->misc_wait, &wait, 1761 prepare_to_wait(&mdev->misc_wait, &wait,
1767 TASK_INTERRUPTIBLE); 1762 TASK_INTERRUPTIBLE);
1768 hlist_for_each_entry(i, n, slot, colision) { 1763 hlist_for_each_entry(i, n, slot, collision) {
1769 if (OVERLAPS) { 1764 if (OVERLAPS) {
1770 /* only ALERT on first iteration, 1765 /* only ALERT on first iteration,
1771 * we may be woken up early... */ 1766 * we may be woken up early... */
@@ -1804,7 +1799,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1804 } 1799 }
1805 1800
1806 if (signal_pending(current)) { 1801 if (signal_pending(current)) {
1807 hlist_del_init(&e->colision); 1802 hlist_del_init(&e->collision);
1808 1803
1809 spin_unlock_irq(&mdev->req_lock); 1804 spin_unlock_irq(&mdev->req_lock);
1810 1805
@@ -1862,7 +1857,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1862 dev_err(DEV, "submit failed, triggering re-connect\n"); 1857 dev_err(DEV, "submit failed, triggering re-connect\n");
1863 spin_lock_irq(&mdev->req_lock); 1858 spin_lock_irq(&mdev->req_lock);
1864 list_del(&e->w.list); 1859 list_del(&e->w.list);
1865 hlist_del_init(&e->colision); 1860 hlist_del_init(&e->collision);
1866 spin_unlock_irq(&mdev->req_lock); 1861 spin_unlock_irq(&mdev->req_lock);
1867 if (e->flags & EE_CALL_AL_COMPLETE_IO) 1862 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1868 drbd_al_complete_io(mdev, e->sector); 1863 drbd_al_complete_io(mdev, e->sector);
@@ -2916,12 +2911,6 @@ disconnect:
2916 return false; 2911 return false;
2917} 2912}
2918 2913
2919static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2920{
2921 /* sorry, we currently have no working implementation
2922 * of distributed TCQ */
2923}
2924
2925/* warn if the arguments differ by more than 12.5% */ 2914/* warn if the arguments differ by more than 12.5% */
2926static void warn_if_differ_considerably(struct drbd_conf *mdev, 2915static void warn_if_differ_considerably(struct drbd_conf *mdev,
2927 const char *s, sector_t a, sector_t b) 2916 const char *s, sector_t a, sector_t b)
@@ -2939,7 +2928,6 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
2939{ 2928{
2940 struct p_sizes *p = &mdev->data.rbuf.sizes; 2929 struct p_sizes *p = &mdev->data.rbuf.sizes;
2941 enum determine_dev_size dd = unchanged; 2930 enum determine_dev_size dd = unchanged;
2942 unsigned int max_bio_size;
2943 sector_t p_size, p_usize, my_usize; 2931 sector_t p_size, p_usize, my_usize;
2944 int ldsc = 0; /* local disk size changed */ 2932 int ldsc = 0; /* local disk size changed */
2945 enum dds_flags ddsf; 2933 enum dds_flags ddsf;
@@ -2994,7 +2982,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
2994 2982
2995 ddsf = be16_to_cpu(p->dds_flags); 2983 ddsf = be16_to_cpu(p->dds_flags);
2996 if (get_ldev(mdev)) { 2984 if (get_ldev(mdev)) {
2997 dd = drbd_determin_dev_size(mdev, ddsf); 2985 dd = drbd_determine_dev_size(mdev, ddsf);
2998 put_ldev(mdev); 2986 put_ldev(mdev);
2999 if (dd == dev_size_error) 2987 if (dd == dev_size_error)
3000 return false; 2988 return false;
@@ -3004,23 +2992,15 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3004 drbd_set_my_capacity(mdev, p_size); 2992 drbd_set_my_capacity(mdev, p_size);
3005 } 2993 }
3006 2994
2995 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
2996 drbd_reconsider_max_bio_size(mdev);
2997
3007 if (get_ldev(mdev)) { 2998 if (get_ldev(mdev)) {
3008 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { 2999 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3009 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 3000 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3010 ldsc = 1; 3001 ldsc = 1;
3011 } 3002 }
3012 3003
3013 if (mdev->agreed_pro_version < 94)
3014 max_bio_size = be32_to_cpu(p->max_bio_size);
3015 else if (mdev->agreed_pro_version == 94)
3016 max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
3017 else /* drbd 8.3.8 onwards */
3018 max_bio_size = DRBD_MAX_BIO_SIZE;
3019
3020 if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9)
3021 drbd_setup_queue_param(mdev, max_bio_size);
3022
3023 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
3024 put_ldev(mdev); 3004 put_ldev(mdev);
3025 } 3005 }
3026 3006
@@ -4275,7 +4255,7 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4275 struct hlist_node *n; 4255 struct hlist_node *n;
4276 struct drbd_request *req; 4256 struct drbd_request *req;
4277 4257
4278 hlist_for_each_entry(req, n, slot, colision) { 4258 hlist_for_each_entry(req, n, slot, collision) {
4279 if ((unsigned long)req == (unsigned long)id) { 4259 if ((unsigned long)req == (unsigned long)id) {
4280 if (req->sector != sector) { 4260 if (req->sector != sector) {
4281 dev_err(DEV, "_ack_id_to_req: found req %p but it has " 4261 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
@@ -4554,6 +4534,7 @@ int drbd_asender(struct drbd_thread *thi)
4554 int received = 0; 4534 int received = 0;
4555 int expect = sizeof(struct p_header80); 4535 int expect = sizeof(struct p_header80);
4556 int empty; 4536 int empty;
4537 int ping_timeout_active = 0;
4557 4538
4558 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev)); 4539 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4559 4540
@@ -4566,6 +4547,7 @@ int drbd_asender(struct drbd_thread *thi)
4566 ERR_IF(!drbd_send_ping(mdev)) goto reconnect; 4547 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4567 mdev->meta.socket->sk->sk_rcvtimeo = 4548 mdev->meta.socket->sk->sk_rcvtimeo =
4568 mdev->net_conf->ping_timeo*HZ/10; 4549 mdev->net_conf->ping_timeo*HZ/10;
4550 ping_timeout_active = 1;
4569 } 4551 }
4570 4552
4571 /* conditionally cork; 4553 /* conditionally cork;
@@ -4620,8 +4602,7 @@ int drbd_asender(struct drbd_thread *thi)
4620 dev_err(DEV, "meta connection shut down by peer.\n"); 4602 dev_err(DEV, "meta connection shut down by peer.\n");
4621 goto reconnect; 4603 goto reconnect;
4622 } else if (rv == -EAGAIN) { 4604 } else if (rv == -EAGAIN) {
4623 if (mdev->meta.socket->sk->sk_rcvtimeo == 4605 if (ping_timeout_active) {
4624 mdev->net_conf->ping_timeo*HZ/10) {
4625 dev_err(DEV, "PingAck did not arrive in time.\n"); 4606 dev_err(DEV, "PingAck did not arrive in time.\n");
4626 goto reconnect; 4607 goto reconnect;
4627 } 4608 }
@@ -4660,6 +4641,11 @@ int drbd_asender(struct drbd_thread *thi)
4660 if (!cmd->process(mdev, h)) 4641 if (!cmd->process(mdev, h))
4661 goto reconnect; 4642 goto reconnect;
4662 4643
4644 /* the idle_timeout (ping-int)
4645 * has been restored in got_PingAck() */
4646 if (cmd == get_asender_cmd(P_PING_ACK))
4647 ping_timeout_active = 0;
4648
4663 buf = h; 4649 buf = h;
4664 received = 0; 4650 received = 0;
4665 expect = sizeof(struct p_header80); 4651 expect = sizeof(struct p_header80);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 5c0c8be1bb0a..3424d675b769 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -163,7 +163,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
163 * they must have been failed on the spot */ 163 * they must have been failed on the spot */
164#define OVERLAPS overlaps(sector, size, i->sector, i->size) 164#define OVERLAPS overlaps(sector, size, i->sector, i->size)
165 slot = tl_hash_slot(mdev, sector); 165 slot = tl_hash_slot(mdev, sector);
166 hlist_for_each_entry(i, n, slot, colision) { 166 hlist_for_each_entry(i, n, slot, collision) {
167 if (OVERLAPS) { 167 if (OVERLAPS) {
168 dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; " 168 dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; "
169 "other: %p %llus +%u\n", 169 "other: %p %llus +%u\n",
@@ -187,7 +187,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
187#undef OVERLAPS 187#undef OVERLAPS
188#define OVERLAPS overlaps(sector, size, e->sector, e->size) 188#define OVERLAPS overlaps(sector, size, e->sector, e->size)
189 slot = ee_hash_slot(mdev, req->sector); 189 slot = ee_hash_slot(mdev, req->sector);
190 hlist_for_each_entry(e, n, slot, colision) { 190 hlist_for_each_entry(e, n, slot, collision) {
191 if (OVERLAPS) { 191 if (OVERLAPS) {
192 wake_up(&mdev->misc_wait); 192 wake_up(&mdev->misc_wait);
193 break; 193 break;
@@ -260,8 +260,8 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
260 260
261 /* remove the request from the conflict detection 261 /* remove the request from the conflict detection
262 * respective block_id verification hash */ 262 * respective block_id verification hash */
263 if (!hlist_unhashed(&req->colision)) 263 if (!hlist_unhashed(&req->collision))
264 hlist_del(&req->colision); 264 hlist_del(&req->collision);
265 else 265 else
266 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); 266 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
267 267
@@ -329,7 +329,7 @@ static int _req_conflicts(struct drbd_request *req)
329 struct hlist_node *n; 329 struct hlist_node *n;
330 struct hlist_head *slot; 330 struct hlist_head *slot;
331 331
332 D_ASSERT(hlist_unhashed(&req->colision)); 332 D_ASSERT(hlist_unhashed(&req->collision));
333 333
334 if (!get_net_conf(mdev)) 334 if (!get_net_conf(mdev))
335 return 0; 335 return 0;
@@ -341,7 +341,7 @@ static int _req_conflicts(struct drbd_request *req)
341 341
342#define OVERLAPS overlaps(i->sector, i->size, sector, size) 342#define OVERLAPS overlaps(i->sector, i->size, sector, size)
343 slot = tl_hash_slot(mdev, sector); 343 slot = tl_hash_slot(mdev, sector);
344 hlist_for_each_entry(i, n, slot, colision) { 344 hlist_for_each_entry(i, n, slot, collision) {
345 if (OVERLAPS) { 345 if (OVERLAPS) {
346 dev_alert(DEV, "%s[%u] Concurrent local write detected! " 346 dev_alert(DEV, "%s[%u] Concurrent local write detected! "
347 "[DISCARD L] new: %llus +%u; " 347 "[DISCARD L] new: %llus +%u; "
@@ -359,7 +359,7 @@ static int _req_conflicts(struct drbd_request *req)
359#undef OVERLAPS 359#undef OVERLAPS
360#define OVERLAPS overlaps(e->sector, e->size, sector, size) 360#define OVERLAPS overlaps(e->sector, e->size, sector, size)
361 slot = ee_hash_slot(mdev, sector); 361 slot = ee_hash_slot(mdev, sector);
362 hlist_for_each_entry(e, n, slot, colision) { 362 hlist_for_each_entry(e, n, slot, collision) {
363 if (OVERLAPS) { 363 if (OVERLAPS) {
364 dev_alert(DEV, "%s[%u] Concurrent remote write detected!" 364 dev_alert(DEV, "%s[%u] Concurrent remote write detected!"
365 " [DISCARD L] new: %llus +%u; " 365 " [DISCARD L] new: %llus +%u; "
@@ -491,7 +491,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
491 491
492 /* so we can verify the handle in the answer packet 492 /* so we can verify the handle in the answer packet
493 * corresponding hlist_del is in _req_may_be_done() */ 493 * corresponding hlist_del is in _req_may_be_done() */
494 hlist_add_head(&req->colision, ar_hash_slot(mdev, req->sector)); 494 hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
495 495
496 set_bit(UNPLUG_REMOTE, &mdev->flags); 496 set_bit(UNPLUG_REMOTE, &mdev->flags);
497 497
@@ -507,7 +507,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
507 /* assert something? */ 507 /* assert something? */
508 /* from drbd_make_request_common only */ 508 /* from drbd_make_request_common only */
509 509
510 hlist_add_head(&req->colision, tl_hash_slot(mdev, req->sector)); 510 hlist_add_head(&req->collision, tl_hash_slot(mdev, req->sector));
511 /* corresponding hlist_del is in _req_may_be_done() */ 511 /* corresponding hlist_del is in _req_may_be_done() */
512 512
513 /* NOTE 513 /* NOTE
@@ -1033,7 +1033,7 @@ fail_conflicting:
1033 err = 0; 1033 err = 0;
1034 1034
1035fail_free_complete: 1035fail_free_complete:
1036 if (rw == WRITE && local) 1036 if (req->rq_state & RQ_IN_ACT_LOG)
1037 drbd_al_complete_io(mdev, sector); 1037 drbd_al_complete_io(mdev, sector);
1038fail_and_free_req: 1038fail_and_free_req:
1039 if (local) { 1039 if (local) {
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 32e2c3e6a813..68a234a5fdc5 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -256,7 +256,7 @@ static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
256 struct hlist_node *n; 256 struct hlist_node *n;
257 struct drbd_request *req; 257 struct drbd_request *req;
258 258
259 hlist_for_each_entry(req, n, slot, colision) { 259 hlist_for_each_entry(req, n, slot, collision) {
260 if ((unsigned long)req == (unsigned long)id) { 260 if ((unsigned long)req == (unsigned long)id) {
261 D_ASSERT(req->sector == sector); 261 D_ASSERT(req->sector == sector);
262 return req; 262 return req;
@@ -291,7 +291,7 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
291 req->epoch = 0; 291 req->epoch = 0;
292 req->sector = bio_src->bi_sector; 292 req->sector = bio_src->bi_sector;
293 req->size = bio_src->bi_size; 293 req->size = bio_src->bi_size;
294 INIT_HLIST_NODE(&req->colision); 294 INIT_HLIST_NODE(&req->collision);
295 INIT_LIST_HEAD(&req->tl_requests); 295 INIT_LIST_HEAD(&req->tl_requests);
296 INIT_LIST_HEAD(&req->w.list); 296 INIT_LIST_HEAD(&req->w.list);
297 } 297 }
@@ -323,6 +323,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
323extern void complete_master_bio(struct drbd_conf *mdev, 323extern void complete_master_bio(struct drbd_conf *mdev,
324 struct bio_and_error *m); 324 struct bio_and_error *m);
325extern void request_timer_fn(unsigned long data); 325extern void request_timer_fn(unsigned long data);
326extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
326 327
327/* use this if you don't want to deal with calling complete_master_bio() 328/* use this if you don't want to deal with calling complete_master_bio()
328 * outside the spinlock, e.g. when walking some list on cleanup. */ 329 * outside the spinlock, e.g. when walking some list on cleanup. */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index f7e6c92f8d03..4d76b06b6b20 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -126,7 +126,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
126 list_del(&e->w.list); /* has been on active_ee or sync_ee */ 126 list_del(&e->w.list); /* has been on active_ee or sync_ee */
127 list_add_tail(&e->w.list, &mdev->done_ee); 127 list_add_tail(&e->w.list, &mdev->done_ee);
128 128
129 /* No hlist_del_init(&e->colision) here, we did not send the Ack yet, 129 /* No hlist_del_init(&e->collision) here, we did not send the Ack yet,
130 * neither did we wake possibly waiting conflicting requests. 130 * neither did we wake possibly waiting conflicting requests.
131 * done from "drbd_process_done_ee" within the appropriate w.cb 131 * done from "drbd_process_done_ee" within the appropriate w.cb
132 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */ 132 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
@@ -297,42 +297,48 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
297 crypto_hash_final(&desc, digest); 297 crypto_hash_final(&desc, digest);
298} 298}
299 299
300static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 300/* TODO merge common code with w_e_end_ov_req */
301int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
301{ 302{
302 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w); 303 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
303 int digest_size; 304 int digest_size;
304 void *digest; 305 void *digest;
305 int ok; 306 int ok = 1;
306 307
307 D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef); 308 D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
308 309
309 if (unlikely(cancel)) { 310 if (unlikely(cancel))
310 drbd_free_ee(mdev, e); 311 goto out;
311 return 1;
312 }
313 312
314 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 313 if (likely((e->flags & EE_WAS_ERROR) != 0))
315 digest_size = crypto_hash_digestsize(mdev->csums_tfm); 314 goto out;
316 digest = kmalloc(digest_size, GFP_NOIO);
317 if (digest) {
318 drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
319 315
320 inc_rs_pending(mdev); 316 digest_size = crypto_hash_digestsize(mdev->csums_tfm);
321 ok = drbd_send_drequest_csum(mdev, 317 digest = kmalloc(digest_size, GFP_NOIO);
322 e->sector, 318 if (digest) {
323 e->size, 319 sector_t sector = e->sector;
324 digest, 320 unsigned int size = e->size;
325 digest_size, 321 drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
326 P_CSUM_RS_REQUEST); 322 /* Free e and pages before send.
327 kfree(digest); 323 * In case we block on congestion, we could otherwise run into
328 } else { 324 * some distributed deadlock, if the other side blocks on
329 dev_err(DEV, "kmalloc() of digest failed.\n"); 325 * congestion as well, because our receiver blocks in
330 ok = 0; 326 * drbd_pp_alloc due to pp_in_use > max_buffers. */
331 } 327 drbd_free_ee(mdev, e);
332 } else 328 e = NULL;
333 ok = 1; 329 inc_rs_pending(mdev);
330 ok = drbd_send_drequest_csum(mdev, sector, size,
331 digest, digest_size,
332 P_CSUM_RS_REQUEST);
333 kfree(digest);
334 } else {
335 dev_err(DEV, "kmalloc() of digest failed.\n");
336 ok = 0;
337 }
334 338
335 drbd_free_ee(mdev, e); 339out:
340 if (e)
341 drbd_free_ee(mdev, e);
336 342
337 if (unlikely(!ok)) 343 if (unlikely(!ok))
338 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); 344 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
@@ -834,7 +840,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
834 const int ratio = 840 const int ratio =
835 (t == 0) ? 0 : 841 (t == 0) ? 0 :
836 (t < 100000) ? ((s*100)/t) : (s/(t/100)); 842 (t < 100000) ? ((s*100)/t) : (s/(t/100));
837 dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; " 843 dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
838 "transferred %luK total %luK\n", 844 "transferred %luK total %luK\n",
839 ratio, 845 ratio,
840 Bit2KB(mdev->rs_same_csum), 846 Bit2KB(mdev->rs_same_csum),
@@ -1071,9 +1077,12 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1071 return ok; 1077 return ok;
1072} 1078}
1073 1079
1080/* TODO merge common code with w_e_send_csum */
1074int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1081int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1075{ 1082{
1076 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w); 1083 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1084 sector_t sector = e->sector;
1085 unsigned int size = e->size;
1077 int digest_size; 1086 int digest_size;
1078 void *digest; 1087 void *digest;
1079 int ok = 1; 1088 int ok = 1;
@@ -1093,17 +1102,25 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1093 else 1102 else
1094 memset(digest, 0, digest_size); 1103 memset(digest, 0, digest_size);
1095 1104
1105 /* Free e and pages before send.
1106 * In case we block on congestion, we could otherwise run into
1107 * some distributed deadlock, if the other side blocks on
1108 * congestion as well, because our receiver blocks in
1109 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1110 drbd_free_ee(mdev, e);
1111 e = NULL;
1096 inc_rs_pending(mdev); 1112 inc_rs_pending(mdev);
1097 ok = drbd_send_drequest_csum(mdev, e->sector, e->size, 1113 ok = drbd_send_drequest_csum(mdev, sector, size,
1098 digest, digest_size, P_OV_REPLY); 1114 digest, digest_size,
1115 P_OV_REPLY);
1099 if (!ok) 1116 if (!ok)
1100 dec_rs_pending(mdev); 1117 dec_rs_pending(mdev);
1101 kfree(digest); 1118 kfree(digest);
1102 1119
1103out: 1120out:
1104 drbd_free_ee(mdev, e); 1121 if (e)
1122 drbd_free_ee(mdev, e);
1105 dec_unacked(mdev); 1123 dec_unacked(mdev);
1106
1107 return ok; 1124 return ok;
1108} 1125}
1109 1126
@@ -1122,8 +1139,10 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1122{ 1139{
1123 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w); 1140 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1124 struct digest_info *di; 1141 struct digest_info *di;
1125 int digest_size;
1126 void *digest; 1142 void *digest;
1143 sector_t sector = e->sector;
1144 unsigned int size = e->size;
1145 int digest_size;
1127 int ok, eq = 0; 1146 int ok, eq = 0;
1128 1147
1129 if (unlikely(cancel)) { 1148 if (unlikely(cancel)) {
@@ -1153,16 +1172,21 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1153 } 1172 }
1154 } 1173 }
1155 1174
1156 dec_unacked(mdev); 1175 /* Free e and pages before send.
1176 * In case we block on congestion, we could otherwise run into
1177 * some distributed deadlock, if the other side blocks on
1178 * congestion as well, because our receiver blocks in
1179 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1180 drbd_free_ee(mdev, e);
1157 if (!eq) 1181 if (!eq)
1158 drbd_ov_oos_found(mdev, e->sector, e->size); 1182 drbd_ov_oos_found(mdev, sector, size);
1159 else 1183 else
1160 ov_oos_print(mdev); 1184 ov_oos_print(mdev);
1161 1185
1162 ok = drbd_send_ack_ex(mdev, P_OV_RESULT, e->sector, e->size, 1186 ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
1163 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC); 1187 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1164 1188
1165 drbd_free_ee(mdev, e); 1189 dec_unacked(mdev);
1166 1190
1167 --mdev->ov_left; 1191 --mdev->ov_left;
1168 1192
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a076a14ca72d..c59a672a3de0 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1658,7 +1658,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1658 struct kobject *kobj; 1658 struct kobject *kobj;
1659 1659
1660 mutex_lock(&loop_devices_mutex); 1660 mutex_lock(&loop_devices_mutex);
1661 lo = loop_init_one(dev & MINORMASK); 1661 lo = loop_init_one(MINOR(dev) >> part_shift);
1662 kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); 1662 kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
1663 mutex_unlock(&loop_devices_mutex); 1663 mutex_unlock(&loop_devices_mutex);
1664 1664
@@ -1691,15 +1691,18 @@ static int __init loop_init(void)
1691 if (max_part > 0) 1691 if (max_part > 0)
1692 part_shift = fls(max_part); 1692 part_shift = fls(max_part);
1693 1693
1694 if ((1UL << part_shift) > DISK_MAX_PARTS)
1695 return -EINVAL;
1696
1694 if (max_loop > 1UL << (MINORBITS - part_shift)) 1697 if (max_loop > 1UL << (MINORBITS - part_shift))
1695 return -EINVAL; 1698 return -EINVAL;
1696 1699
1697 if (max_loop) { 1700 if (max_loop) {
1698 nr = max_loop; 1701 nr = max_loop;
1699 range = max_loop; 1702 range = max_loop << part_shift;
1700 } else { 1703 } else {
1701 nr = 8; 1704 nr = 8;
1702 range = 1UL << (MINORBITS - part_shift); 1705 range = 1UL << MINORBITS;
1703 } 1706 }
1704 1707
1705 if (register_blkdev(LOOP_MAJOR, "loop")) 1708 if (register_blkdev(LOOP_MAJOR, "loop"))
@@ -1738,7 +1741,7 @@ static void __exit loop_exit(void)
1738 unsigned long range; 1741 unsigned long range;
1739 struct loop_device *lo, *next; 1742 struct loop_device *lo, *next;
1740 1743
1741 range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift); 1744 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
1742 1745
1743 list_for_each_entry_safe(lo, next, &loop_devices, lo_list) 1746 list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
1744 loop_del_one(lo); 1747 loop_del_one(lo);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 8690e31d9932..a0aabd904a51 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -320,6 +320,8 @@ static void pcd_init_units(void)
320 disk->first_minor = unit; 320 disk->first_minor = unit;
321 strcpy(disk->disk_name, cd->name); /* umm... */ 321 strcpy(disk->disk_name, cd->name); /* umm... */
322 disk->fops = &pcd_bdops; 322 disk->fops = &pcd_bdops;
323 disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
324 disk->events = DISK_EVENT_MEDIA_CHANGE;
323 } 325 }
324} 326}
325 327
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 9712fad82bc6..1278098624e6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1191,14 +1191,19 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
1191static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) 1191static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1192{ 1192{
1193 struct rbd_device *dev = (struct rbd_device *)data; 1193 struct rbd_device *dev = (struct rbd_device *)data;
1194 int rc;
1195
1194 if (!dev) 1196 if (!dev)
1195 return; 1197 return;
1196 1198
1197 dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name, 1199 dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
1198 notify_id, (int)opcode); 1200 notify_id, (int)opcode);
1199 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 1201 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1200 __rbd_update_snaps(dev); 1202 rc = __rbd_update_snaps(dev);
1201 mutex_unlock(&ctl_mutex); 1203 mutex_unlock(&ctl_mutex);
1204 if (rc)
1205 pr_warning(DRV_NAME "%d got notification but failed to update"
1206 " snaps: %d\n", dev->major, rc);
1202 1207
1203 rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name); 1208 rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name);
1204} 1209}
@@ -1597,7 +1602,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
1597 int name_len = strlen(snap_name); 1602 int name_len = strlen(snap_name);
1598 u64 new_snapid; 1603 u64 new_snapid;
1599 int ret; 1604 int ret;
1600 void *data, *data_start, *data_end; 1605 void *data, *p, *e;
1601 u64 ver; 1606 u64 ver;
1602 1607
1603 /* we should create a snapshot only if we're pointing at the head */ 1608 /* we should create a snapshot only if we're pointing at the head */
@@ -1614,16 +1619,16 @@ static int rbd_header_add_snap(struct rbd_device *dev,
1614 if (!data) 1619 if (!data)
1615 return -ENOMEM; 1620 return -ENOMEM;
1616 1621
1617 data_start = data; 1622 p = data;
1618 data_end = data + name_len + 16; 1623 e = data + name_len + 16;
1619 1624
1620 ceph_encode_string_safe(&data, data_end, snap_name, name_len, bad); 1625 ceph_encode_string_safe(&p, e, snap_name, name_len, bad);
1621 ceph_encode_64_safe(&data, data_end, new_snapid, bad); 1626 ceph_encode_64_safe(&p, e, new_snapid, bad);
1622 1627
1623 ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add", 1628 ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add",
1624 data_start, data - data_start, &ver); 1629 data, p - data, &ver);
1625 1630
1626 kfree(data_start); 1631 kfree(data);
1627 1632
1628 if (ret < 0) 1633 if (ret < 0)
1629 return ret; 1634 return ret;
@@ -1659,6 +1664,9 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
1659 if (ret < 0) 1664 if (ret < 0)
1660 return ret; 1665 return ret;
1661 1666
1667 /* resized? */
1668 set_capacity(rbd_dev->disk, h.image_size / 512ULL);
1669
1662 down_write(&rbd_dev->header.snap_rwsem); 1670 down_write(&rbd_dev->header.snap_rwsem);
1663 1671
1664 snap_seq = rbd_dev->header.snapc->seq; 1672 snap_seq = rbd_dev->header.snapc->seq;
@@ -1716,7 +1724,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
1716 if (!disk) 1724 if (!disk)
1717 goto out; 1725 goto out;
1718 1726
1719 sprintf(disk->disk_name, DRV_NAME "%d", rbd_dev->id); 1727 snprintf(disk->disk_name, sizeof(disk->disk_name), DRV_NAME "%d",
1728 rbd_dev->id);
1720 disk->major = rbd_dev->major; 1729 disk->major = rbd_dev->major;
1721 disk->first_minor = 0; 1730 disk->first_minor = 0;
1722 disk->fops = &rbd_bd_ops; 1731 disk->fops = &rbd_bd_ops;
diff --git a/drivers/block/xen-blkback/Makefile b/drivers/block/xen-blkback/Makefile
new file mode 100644
index 000000000000..e491c1b76878
--- /dev/null
+++ b/drivers/block/xen-blkback/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_XEN_BLKDEV_BACKEND) := xen-blkback.o
2
3xen-blkback-y := blkback.o xenbus.o
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
new file mode 100644
index 000000000000..c73910cc28c9
--- /dev/null
+++ b/drivers/block/xen-blkback/blkback.c
@@ -0,0 +1,824 @@
1/******************************************************************************
2 *
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
8 *
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37#include <linux/spinlock.h>
38#include <linux/kthread.h>
39#include <linux/list.h>
40#include <linux/delay.h>
41#include <linux/freezer.h>
42
43#include <xen/events.h>
44#include <xen/page.h>
45#include <asm/xen/hypervisor.h>
46#include <asm/xen/hypercall.h>
47#include "common.h"
48
49/*
50 * These are rather arbitrary. They are fairly large because adjacent requests
51 * pulled from a communication ring are quite likely to end up being part of
52 * the same scatter/gather request at the disc.
53 *
54 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
55 *
56 * This will increase the chances of being able to write whole tracks.
57 * 64 should be enough to keep us competitive with Linux.
58 */
59static int xen_blkif_reqs = 64;
60module_param_named(reqs, xen_blkif_reqs, int, 0);
61MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
62
63/* Run-time switchable: /sys/module/blkback/parameters/ */
64static unsigned int log_stats;
65module_param(log_stats, int, 0644);
66
67/*
68 * Each outstanding request that we've passed to the lower device layers has a
69 * 'pending_req' allocated to it. Each buffer_head that completes decrements
70 * the pendcnt towards zero. When it hits zero, the specified domain has a
71 * response queued for it, with the saved 'id' passed back.
72 */
73struct pending_req {
74 struct xen_blkif *blkif;
75 u64 id;
76 int nr_pages;
77 atomic_t pendcnt;
78 unsigned short operation;
79 int status;
80 struct list_head free_list;
81};
82
83#define BLKBACK_INVALID_HANDLE (~0)
84
85struct xen_blkbk {
86 struct pending_req *pending_reqs;
87 /* List of all 'pending_req' available */
88 struct list_head pending_free;
89 /* And its spinlock. */
90 spinlock_t pending_free_lock;
91 wait_queue_head_t pending_free_wq;
92 /* The list of all pages that are available. */
93 struct page **pending_pages;
94 /* And the grant handles that are available. */
95 grant_handle_t *pending_grant_handles;
96};
97
98static struct xen_blkbk *blkbk;
99
100/*
101 * Little helpful macro to figure out the index and virtual address of the
102 * pending_pages[..]. For each 'pending_req' we have have up to
103 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
104 * 10 and would index in the pending_pages[..].
105 */
106static inline int vaddr_pagenr(struct pending_req *req, int seg)
107{
108 return (req - blkbk->pending_reqs) *
109 BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
110}
111
112#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
113
114static inline unsigned long vaddr(struct pending_req *req, int seg)
115{
116 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
117 return (unsigned long)pfn_to_kaddr(pfn);
118}
119
120#define pending_handle(_req, _seg) \
121 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
122
123
124static int do_block_io_op(struct xen_blkif *blkif);
125static int dispatch_rw_block_io(struct xen_blkif *blkif,
126 struct blkif_request *req,
127 struct pending_req *pending_req);
128static void make_response(struct xen_blkif *blkif, u64 id,
129 unsigned short op, int st);
130
131/*
132 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
133 */
134static struct pending_req *alloc_req(void)
135{
136 struct pending_req *req = NULL;
137 unsigned long flags;
138
139 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
140 if (!list_empty(&blkbk->pending_free)) {
141 req = list_entry(blkbk->pending_free.next, struct pending_req,
142 free_list);
143 list_del(&req->free_list);
144 }
145 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
146 return req;
147}
148
149/*
150 * Return the 'pending_req' structure back to the freepool. We also
151 * wake up the thread if it was waiting for a free page.
152 */
153static void free_req(struct pending_req *req)
154{
155 unsigned long flags;
156 int was_empty;
157
158 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
159 was_empty = list_empty(&blkbk->pending_free);
160 list_add(&req->free_list, &blkbk->pending_free);
161 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
162 if (was_empty)
163 wake_up(&blkbk->pending_free_wq);
164}
165
166/*
167 * Routines for managing virtual block devices (vbds).
168 */
169static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
170 int operation)
171{
172 struct xen_vbd *vbd = &blkif->vbd;
173 int rc = -EACCES;
174
175 if ((operation != READ) && vbd->readonly)
176 goto out;
177
178 if (likely(req->nr_sects)) {
179 blkif_sector_t end = req->sector_number + req->nr_sects;
180
181 if (unlikely(end < req->sector_number))
182 goto out;
183 if (unlikely(end > vbd_sz(vbd)))
184 goto out;
185 }
186
187 req->dev = vbd->pdevice;
188 req->bdev = vbd->bdev;
189 rc = 0;
190
191 out:
192 return rc;
193}
194
195static void xen_vbd_resize(struct xen_blkif *blkif)
196{
197 struct xen_vbd *vbd = &blkif->vbd;
198 struct xenbus_transaction xbt;
199 int err;
200 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
201 unsigned long long new_size = vbd_sz(vbd);
202
203 pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
204 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
205 pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
206 vbd->size = new_size;
207again:
208 err = xenbus_transaction_start(&xbt);
209 if (err) {
210 pr_warn(DRV_PFX "Error starting transaction");
211 return;
212 }
213 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
214 (unsigned long long)vbd_sz(vbd));
215 if (err) {
216 pr_warn(DRV_PFX "Error writing new size");
217 goto abort;
218 }
219 /*
220 * Write the current state; we will use this to synchronize
221 * the front-end. If the current state is "connected" the
222 * front-end will get the new size information online.
223 */
224 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
225 if (err) {
226 pr_warn(DRV_PFX "Error writing the state");
227 goto abort;
228 }
229
230 err = xenbus_transaction_end(xbt, 0);
231 if (err == -EAGAIN)
232 goto again;
233 if (err)
234 pr_warn(DRV_PFX "Error ending transaction");
235 return;
236abort:
237 xenbus_transaction_end(xbt, 1);
238}
239
240/*
241 * Notification from the guest OS.
242 */
243static void blkif_notify_work(struct xen_blkif *blkif)
244{
245 blkif->waiting_reqs = 1;
246 wake_up(&blkif->wq);
247}
248
249irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
250{
251 blkif_notify_work(dev_id);
252 return IRQ_HANDLED;
253}
254
255/*
256 * SCHEDULER FUNCTIONS
257 */
258
259static void print_stats(struct xen_blkif *blkif)
260{
261 pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d\n",
262 current->comm, blkif->st_oo_req,
263 blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req);
264 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
265 blkif->st_rd_req = 0;
266 blkif->st_wr_req = 0;
267 blkif->st_oo_req = 0;
268}
269
270int xen_blkif_schedule(void *arg)
271{
272 struct xen_blkif *blkif = arg;
273 struct xen_vbd *vbd = &blkif->vbd;
274
275 xen_blkif_get(blkif);
276
277 while (!kthread_should_stop()) {
278 if (try_to_freeze())
279 continue;
280 if (unlikely(vbd->size != vbd_sz(vbd)))
281 xen_vbd_resize(blkif);
282
283 wait_event_interruptible(
284 blkif->wq,
285 blkif->waiting_reqs || kthread_should_stop());
286 wait_event_interruptible(
287 blkbk->pending_free_wq,
288 !list_empty(&blkbk->pending_free) ||
289 kthread_should_stop());
290
291 blkif->waiting_reqs = 0;
292 smp_mb(); /* clear flag *before* checking for work */
293
294 if (do_block_io_op(blkif))
295 blkif->waiting_reqs = 1;
296
297 if (log_stats && time_after(jiffies, blkif->st_print))
298 print_stats(blkif);
299 }
300
301 if (log_stats)
302 print_stats(blkif);
303
304 blkif->xenblkd = NULL;
305 xen_blkif_put(blkif);
306
307 return 0;
308}
309
310struct seg_buf {
311 unsigned long buf;
312 unsigned int nsec;
313};
314/*
315 * Unmap the grant references, and also remove the M2P over-rides
316 * used in the 'pending_req'.
317 */
318static void xen_blkbk_unmap(struct pending_req *req)
319{
320 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
321 unsigned int i, invcount = 0;
322 grant_handle_t handle;
323 int ret;
324
325 for (i = 0; i < req->nr_pages; i++) {
326 handle = pending_handle(req, i);
327 if (handle == BLKBACK_INVALID_HANDLE)
328 continue;
329 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
330 GNTMAP_host_map, handle);
331 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
332 invcount++;
333 }
334
335 ret = HYPERVISOR_grant_table_op(
336 GNTTABOP_unmap_grant_ref, unmap, invcount);
337 BUG_ON(ret);
338 /*
339 * Note, we use invcount, so nr->pages, so we can't index
340 * using vaddr(req, i).
341 */
342 for (i = 0; i < invcount; i++) {
343 ret = m2p_remove_override(
344 virt_to_page(unmap[i].host_addr), false);
345 if (ret) {
346 pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
347 (unsigned long)unmap[i].host_addr);
348 continue;
349 }
350 }
351}
352
353static int xen_blkbk_map(struct blkif_request *req,
354 struct pending_req *pending_req,
355 struct seg_buf seg[])
356{
357 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
358 int i;
359 int nseg = req->nr_segments;
360 int ret = 0;
361
362 /*
363 * Fill out preq.nr_sects with proper amount of sectors, and setup
364 * assign map[..] with the PFN of the page in our domain with the
365 * corresponding grant reference for each page.
366 */
367 for (i = 0; i < nseg; i++) {
368 uint32_t flags;
369
370 flags = GNTMAP_host_map;
371 if (pending_req->operation != BLKIF_OP_READ)
372 flags |= GNTMAP_readonly;
373 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
374 req->u.rw.seg[i].gref,
375 pending_req->blkif->domid);
376 }
377
378 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
379 BUG_ON(ret);
380
381 /*
382 * Now swizzle the MFN in our domain with the MFN from the other domain
383 * so that when we access vaddr(pending_req,i) it has the contents of
384 * the page from the other domain.
385 */
386 for (i = 0; i < nseg; i++) {
387 if (unlikely(map[i].status != 0)) {
388 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
389 map[i].handle = BLKBACK_INVALID_HANDLE;
390 ret |= 1;
391 }
392
393 pending_handle(pending_req, i) = map[i].handle;
394
395 if (ret)
396 continue;
397
398 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
399 blkbk->pending_page(pending_req, i), false);
400 if (ret) {
401 pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
402 (unsigned long)map[i].dev_bus_addr, ret);
403 /* We could switch over to GNTTABOP_copy */
404 continue;
405 }
406
407 seg[i].buf = map[i].dev_bus_addr |
408 (req->u.rw.seg[i].first_sect << 9);
409 }
410 return ret;
411}
412
413/*
414 * Completion callback on the bio's. Called as bh->b_end_io()
415 */
416
417static void __end_block_io_op(struct pending_req *pending_req, int error)
418{
419 /* An error fails the entire request. */
420 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
421 (error == -EOPNOTSUPP)) {
422 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
423 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
424 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
425 } else if (error) {
426 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
427 " error=%d\n", error);
428 pending_req->status = BLKIF_RSP_ERROR;
429 }
430
431 /*
432 * If all of the bio's have completed it is time to unmap
433 * the grant references associated with 'request' and provide
434 * the proper response on the ring.
435 */
436 if (atomic_dec_and_test(&pending_req->pendcnt)) {
437 xen_blkbk_unmap(pending_req);
438 make_response(pending_req->blkif, pending_req->id,
439 pending_req->operation, pending_req->status);
440 xen_blkif_put(pending_req->blkif);
441 free_req(pending_req);
442 }
443}
444
445/*
446 * bio callback.
447 */
448static void end_block_io_op(struct bio *bio, int error)
449{
450 __end_block_io_op(bio->bi_private, error);
451 bio_put(bio);
452}
453
454
455
456/*
457 * Function to copy the from the ring buffer the 'struct blkif_request'
458 * (which has the sectors we want, number of them, grant references, etc),
459 * and transmute it to the block API to hand it over to the proper block disk.
460 */
461static int do_block_io_op(struct xen_blkif *blkif)
462{
463 union blkif_back_rings *blk_rings = &blkif->blk_rings;
464 struct blkif_request req;
465 struct pending_req *pending_req;
466 RING_IDX rc, rp;
467 int more_to_do = 0;
468
469 rc = blk_rings->common.req_cons;
470 rp = blk_rings->common.sring->req_prod;
471 rmb(); /* Ensure we see queued requests up to 'rp'. */
472
473 while (rc != rp) {
474
475 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
476 break;
477
478 if (kthread_should_stop()) {
479 more_to_do = 1;
480 break;
481 }
482
483 pending_req = alloc_req();
484 if (NULL == pending_req) {
485 blkif->st_oo_req++;
486 more_to_do = 1;
487 break;
488 }
489
490 switch (blkif->blk_protocol) {
491 case BLKIF_PROTOCOL_NATIVE:
492 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
493 break;
494 case BLKIF_PROTOCOL_X86_32:
495 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
496 break;
497 case BLKIF_PROTOCOL_X86_64:
498 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
499 break;
500 default:
501 BUG();
502 }
503 blk_rings->common.req_cons = ++rc; /* before make_response() */
504
505 /* Apply all sanity checks to /private copy/ of request. */
506 barrier();
507
508 if (dispatch_rw_block_io(blkif, &req, pending_req))
509 break;
510
511 /* Yield point for this unbounded loop. */
512 cond_resched();
513 }
514
515 return more_to_do;
516}
517
518/*
519 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
520 * and call the 'submit_bio' to pass it to the underlying storage.
521 */
522static int dispatch_rw_block_io(struct xen_blkif *blkif,
523 struct blkif_request *req,
524 struct pending_req *pending_req)
525{
526 struct phys_req preq;
527 struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
528 unsigned int nseg;
529 struct bio *bio = NULL;
530 struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
531 int i, nbio = 0;
532 int operation;
533 struct blk_plug plug;
534
535 switch (req->operation) {
536 case BLKIF_OP_READ:
537 blkif->st_rd_req++;
538 operation = READ;
539 break;
540 case BLKIF_OP_WRITE:
541 blkif->st_wr_req++;
542 operation = WRITE_ODIRECT;
543 break;
544 case BLKIF_OP_FLUSH_DISKCACHE:
545 blkif->st_f_req++;
546 operation = WRITE_FLUSH;
547 break;
548 case BLKIF_OP_WRITE_BARRIER:
549 default:
550 operation = 0; /* make gcc happy */
551 goto fail_response;
552 break;
553 }
554
555 /* Check that the number of segments is sane. */
556 nseg = req->nr_segments;
557 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
558 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
559 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
560 nseg);
561 /* Haven't submitted any bio's yet. */
562 goto fail_response;
563 }
564
565 preq.dev = req->handle;
566 preq.sector_number = req->u.rw.sector_number;
567 preq.nr_sects = 0;
568
569 pending_req->blkif = blkif;
570 pending_req->id = req->id;
571 pending_req->operation = req->operation;
572 pending_req->status = BLKIF_RSP_OKAY;
573 pending_req->nr_pages = nseg;
574
575 for (i = 0; i < nseg; i++) {
576 seg[i].nsec = req->u.rw.seg[i].last_sect -
577 req->u.rw.seg[i].first_sect + 1;
578 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
579 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
580 goto fail_response;
581 preq.nr_sects += seg[i].nsec;
582
583 }
584
585 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
586 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
587 operation == READ ? "read" : "write",
588 preq.sector_number,
589 preq.sector_number + preq.nr_sects, preq.dev);
590 goto fail_response;
591 }
592
593 /*
594 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
595 * is set there.
596 */
597 for (i = 0; i < nseg; i++) {
598 if (((int)preq.sector_number|(int)seg[i].nsec) &
599 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
600 pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
601 blkif->domid);
602 goto fail_response;
603 }
604 }
605
606 /*
607 * If we have failed at this point, we need to undo the M2P override,
608 * set gnttab_set_unmap_op on all of the grant references and perform
609 * the hypercall to unmap the grants - that is all done in
610 * xen_blkbk_unmap.
611 */
612 if (xen_blkbk_map(req, pending_req, seg))
613 goto fail_flush;
614
615 /* This corresponding xen_blkif_put is done in __end_block_io_op */
616 xen_blkif_get(blkif);
617
618 for (i = 0; i < nseg; i++) {
619 while ((bio == NULL) ||
620 (bio_add_page(bio,
621 blkbk->pending_page(pending_req, i),
622 seg[i].nsec << 9,
623 seg[i].buf & ~PAGE_MASK) == 0)) {
624
625 bio = bio_alloc(GFP_KERNEL, nseg-i);
626 if (unlikely(bio == NULL))
627 goto fail_put_bio;
628
629 biolist[nbio++] = bio;
630 bio->bi_bdev = preq.bdev;
631 bio->bi_private = pending_req;
632 bio->bi_end_io = end_block_io_op;
633 bio->bi_sector = preq.sector_number;
634 }
635
636 preq.sector_number += seg[i].nsec;
637 }
638
639 /* This will be hit if the operation was a flush. */
640 if (!bio) {
641 BUG_ON(operation != WRITE_FLUSH);
642
643 bio = bio_alloc(GFP_KERNEL, 0);
644 if (unlikely(bio == NULL))
645 goto fail_put_bio;
646
647 biolist[nbio++] = bio;
648 bio->bi_bdev = preq.bdev;
649 bio->bi_private = pending_req;
650 bio->bi_end_io = end_block_io_op;
651 }
652
653 /*
654 * We set it one so that the last submit_bio does not have to call
655 * atomic_inc.
656 */
657 atomic_set(&pending_req->pendcnt, nbio);
658
659 /* Get a reference count for the disk queue and start sending I/O */
660 blk_start_plug(&plug);
661
662 for (i = 0; i < nbio; i++)
663 submit_bio(operation, biolist[i]);
664
665 /* Let the I/Os go.. */
666 blk_finish_plug(&plug);
667
668 if (operation == READ)
669 blkif->st_rd_sect += preq.nr_sects;
670 else if (operation == WRITE || operation == WRITE_FLUSH)
671 blkif->st_wr_sect += preq.nr_sects;
672
673 return 0;
674
675 fail_flush:
676 xen_blkbk_unmap(pending_req);
677 fail_response:
678 /* Haven't submitted any bio's yet. */
679 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
680 free_req(pending_req);
681 msleep(1); /* back off a bit */
682 return -EIO;
683
684 fail_put_bio:
685 for (i = 0; i < nbio; i++)
686 bio_put(biolist[i]);
687 __end_block_io_op(pending_req, -EINVAL);
688 msleep(1); /* back off a bit */
689 return -EIO;
690}
691
692
693
694/*
695 * Put a response on the ring on how the operation fared.
696 */
697static void make_response(struct xen_blkif *blkif, u64 id,
698 unsigned short op, int st)
699{
700 struct blkif_response resp;
701 unsigned long flags;
702 union blkif_back_rings *blk_rings = &blkif->blk_rings;
703 int more_to_do = 0;
704 int notify;
705
706 resp.id = id;
707 resp.operation = op;
708 resp.status = st;
709
710 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
711 /* Place on the response ring for the relevant domain. */
712 switch (blkif->blk_protocol) {
713 case BLKIF_PROTOCOL_NATIVE:
714 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
715 &resp, sizeof(resp));
716 break;
717 case BLKIF_PROTOCOL_X86_32:
718 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
719 &resp, sizeof(resp));
720 break;
721 case BLKIF_PROTOCOL_X86_64:
722 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
723 &resp, sizeof(resp));
724 break;
725 default:
726 BUG();
727 }
728 blk_rings->common.rsp_prod_pvt++;
729 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
730 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
731 /*
732 * Tail check for pending requests. Allows frontend to avoid
733 * notifications if requests are already in flight (lower
734 * overheads and promotes batching).
735 */
736 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
737
738 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
739 more_to_do = 1;
740 }
741
742 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
743
744 if (more_to_do)
745 blkif_notify_work(blkif);
746 if (notify)
747 notify_remote_via_irq(blkif->irq);
748}
749
750static int __init xen_blkif_init(void)
751{
752 int i, mmap_pages;
753 int rc = 0;
754
755 if (!xen_pv_domain())
756 return -ENODEV;
757
758 blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
759 if (!blkbk) {
760 pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
761 return -ENOMEM;
762 }
763
764 mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
765
766 blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) *
767 xen_blkif_reqs, GFP_KERNEL);
768 blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
769 mmap_pages, GFP_KERNEL);
770 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
771 mmap_pages, GFP_KERNEL);
772
773 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
774 !blkbk->pending_pages) {
775 rc = -ENOMEM;
776 goto out_of_memory;
777 }
778
779 for (i = 0; i < mmap_pages; i++) {
780 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
781 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
782 if (blkbk->pending_pages[i] == NULL) {
783 rc = -ENOMEM;
784 goto out_of_memory;
785 }
786 }
787 rc = xen_blkif_interface_init();
788 if (rc)
789 goto failed_init;
790
791 memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
792
793 INIT_LIST_HEAD(&blkbk->pending_free);
794 spin_lock_init(&blkbk->pending_free_lock);
795 init_waitqueue_head(&blkbk->pending_free_wq);
796
797 for (i = 0; i < xen_blkif_reqs; i++)
798 list_add_tail(&blkbk->pending_reqs[i].free_list,
799 &blkbk->pending_free);
800
801 rc = xen_blkif_xenbus_init();
802 if (rc)
803 goto failed_init;
804
805 return 0;
806
807 out_of_memory:
808 pr_alert(DRV_PFX "%s: out of memory\n", __func__);
809 failed_init:
810 kfree(blkbk->pending_reqs);
811 kfree(blkbk->pending_grant_handles);
812 for (i = 0; i < mmap_pages; i++) {
813 if (blkbk->pending_pages[i])
814 __free_page(blkbk->pending_pages[i]);
815 }
816 kfree(blkbk->pending_pages);
817 kfree(blkbk);
818 blkbk = NULL;
819 return rc;
820}
821
822module_init(xen_blkif_init);
823
824MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
new file mode 100644
index 000000000000..9e40b283a468
--- /dev/null
+++ b/drivers/block/xen-blkback/common.h
@@ -0,0 +1,233 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * IN THE SOFTWARE.
25 */
26
27#ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28#define __XEN_BLKIF__BACKEND__COMMON_H__
29
30#include <linux/version.h>
31#include <linux/module.h>
32#include <linux/interrupt.h>
33#include <linux/slab.h>
34#include <linux/blkdev.h>
35#include <linux/vmalloc.h>
36#include <linux/wait.h>
37#include <linux/io.h>
38#include <asm/setup.h>
39#include <asm/pgalloc.h>
40#include <asm/hypervisor.h>
41#include <xen/grant_table.h>
42#include <xen/xenbus.h>
43#include <xen/interface/io/ring.h>
44#include <xen/interface/io/blkif.h>
45#include <xen/interface/io/protocols.h>
46
47#define DRV_PFX "xen-blkback:"
48#define DPRINTK(fmt, args...) \
49 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
50 __func__, __LINE__, ##args)
51
52
53/* Not a real protocol. Used to generate ring structs which contain
54 * the elements common to all protocols only. This way we get a
55 * compiler-checkable way to use common struct elements, so we can
56 * avoid using switch(protocol) in a number of places. */
57struct blkif_common_request {
58 char dummy;
59};
60struct blkif_common_response {
61 char dummy;
62};
63
64/* i386 protocol version */
65#pragma pack(push, 4)
66struct blkif_x86_32_request {
67 uint8_t operation; /* BLKIF_OP_??? */
68 uint8_t nr_segments; /* number of segments */
69 blkif_vdev_t handle; /* only for read/write requests */
70 uint64_t id; /* private guest value, echoed in resp */
71 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
72 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73};
74struct blkif_x86_32_response {
75 uint64_t id; /* copied from request */
76 uint8_t operation; /* copied from request */
77 int16_t status; /* BLKIF_RSP_??? */
78};
79#pragma pack(pop)
80
81/* x86_64 protocol version */
82struct blkif_x86_64_request {
83 uint8_t operation; /* BLKIF_OP_??? */
84 uint8_t nr_segments; /* number of segments */
85 blkif_vdev_t handle; /* only for read/write requests */
86 uint64_t __attribute__((__aligned__(8))) id;
87 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
88 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
89};
90struct blkif_x86_64_response {
91 uint64_t __attribute__((__aligned__(8))) id;
92 uint8_t operation; /* copied from request */
93 int16_t status; /* BLKIF_RSP_??? */
94};
95
96DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
97 struct blkif_common_response);
98DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
99 struct blkif_x86_32_response);
100DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
101 struct blkif_x86_64_response);
102
103union blkif_back_rings {
104 struct blkif_back_ring native;
105 struct blkif_common_back_ring common;
106 struct blkif_x86_32_back_ring x86_32;
107 struct blkif_x86_64_back_ring x86_64;
108};
109
110enum blkif_protocol {
111 BLKIF_PROTOCOL_NATIVE = 1,
112 BLKIF_PROTOCOL_X86_32 = 2,
113 BLKIF_PROTOCOL_X86_64 = 3,
114};
115
116struct xen_vbd {
117 /* What the domain refers to this vbd as. */
118 blkif_vdev_t handle;
119 /* Non-zero -> read-only */
120 unsigned char readonly;
121 /* VDISK_xxx */
122 unsigned char type;
123 /* phys device that this vbd maps to. */
124 u32 pdevice;
125 struct block_device *bdev;
126 /* Cached size parameter. */
127 sector_t size;
128 bool flush_support;
129};
130
131struct backend_info;
132
133struct xen_blkif {
134 /* Unique identifier for this interface. */
135 domid_t domid;
136 unsigned int handle;
137 /* Physical parameters of the comms window. */
138 unsigned int irq;
139 /* Comms information. */
140 enum blkif_protocol blk_protocol;
141 union blkif_back_rings blk_rings;
142 struct vm_struct *blk_ring_area;
143 /* The VBD attached to this interface. */
144 struct xen_vbd vbd;
145 /* Back pointer to the backend_info. */
146 struct backend_info *be;
147 /* Private fields. */
148 spinlock_t blk_ring_lock;
149 atomic_t refcnt;
150
151 wait_queue_head_t wq;
152 /* One thread per one blkif. */
153 struct task_struct *xenblkd;
154 unsigned int waiting_reqs;
155
156 /* statistics */
157 unsigned long st_print;
158 int st_rd_req;
159 int st_wr_req;
160 int st_oo_req;
161 int st_f_req;
162 int st_rd_sect;
163 int st_wr_sect;
164
165 wait_queue_head_t waiting_to_free;
166
167 grant_handle_t shmem_handle;
168 grant_ref_t shmem_ref;
169};
170
171
172#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
173 (_v)->bdev->bd_part->nr_sects : \
174 get_capacity((_v)->bdev->bd_disk))
175
176#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
177#define xen_blkif_put(_b) \
178 do { \
179 if (atomic_dec_and_test(&(_b)->refcnt)) \
180 wake_up(&(_b)->waiting_to_free);\
181 } while (0)
182
183struct phys_req {
184 unsigned short dev;
185 unsigned short nr_sects;
186 struct block_device *bdev;
187 blkif_sector_t sector_number;
188};
189int xen_blkif_interface_init(void);
190
191int xen_blkif_xenbus_init(void);
192
193irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
194int xen_blkif_schedule(void *arg);
195
196int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
197 struct backend_info *be, int state);
198
199struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
200
201static inline void blkif_get_x86_32_req(struct blkif_request *dst,
202 struct blkif_x86_32_request *src)
203{
204 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
205 dst->operation = src->operation;
206 dst->nr_segments = src->nr_segments;
207 dst->handle = src->handle;
208 dst->id = src->id;
209 dst->u.rw.sector_number = src->sector_number;
210 barrier();
211 if (n > dst->nr_segments)
212 n = dst->nr_segments;
213 for (i = 0; i < n; i++)
214 dst->u.rw.seg[i] = src->seg[i];
215}
216
217static inline void blkif_get_x86_64_req(struct blkif_request *dst,
218 struct blkif_x86_64_request *src)
219{
220 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
221 dst->operation = src->operation;
222 dst->nr_segments = src->nr_segments;
223 dst->handle = src->handle;
224 dst->id = src->id;
225 dst->u.rw.sector_number = src->sector_number;
226 barrier();
227 if (n > dst->nr_segments)
228 n = dst->nr_segments;
229 for (i = 0; i < n; i++)
230 dst->u.rw.seg[i] = src->seg[i];
231}
232
233#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
new file mode 100644
index 000000000000..34570823355b
--- /dev/null
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -0,0 +1,768 @@
1/* Xenbus code for blkif backend
2 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
3 Copyright (C) 2005 XenSource Ltd
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15*/
16
17#include <stdarg.h>
18#include <linux/module.h>
19#include <linux/kthread.h>
20#include <xen/events.h>
21#include <xen/grant_table.h>
22#include "common.h"
23
24struct backend_info {
25 struct xenbus_device *dev;
26 struct xen_blkif *blkif;
27 struct xenbus_watch backend_watch;
28 unsigned major;
29 unsigned minor;
30 char *mode;
31};
32
33static struct kmem_cache *xen_blkif_cachep;
34static void connect(struct backend_info *);
35static int connect_ring(struct backend_info *);
36static void backend_changed(struct xenbus_watch *, const char **,
37 unsigned int);
38
39struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
40{
41 return be->dev;
42}
43
44static int blkback_name(struct xen_blkif *blkif, char *buf)
45{
46 char *devpath, *devname;
47 struct xenbus_device *dev = blkif->be->dev;
48
49 devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
50 if (IS_ERR(devpath))
51 return PTR_ERR(devpath);
52
53 devname = strstr(devpath, "/dev/");
54 if (devname != NULL)
55 devname += strlen("/dev/");
56 else
57 devname = devpath;
58
59 snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
60 kfree(devpath);
61
62 return 0;
63}
64
65static void xen_update_blkif_status(struct xen_blkif *blkif)
66{
67 int err;
68 char name[TASK_COMM_LEN];
69
70 /* Not ready to connect? */
71 if (!blkif->irq || !blkif->vbd.bdev)
72 return;
73
74 /* Already connected? */
75 if (blkif->be->dev->state == XenbusStateConnected)
76 return;
77
78 /* Attempt to connect: exit if we fail to. */
79 connect(blkif->be);
80 if (blkif->be->dev->state != XenbusStateConnected)
81 return;
82
83 err = blkback_name(blkif, name);
84 if (err) {
85 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
86 return;
87 }
88
89 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
90 if (err) {
91 xenbus_dev_error(blkif->be->dev, err, "block flush");
92 return;
93 }
94 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
95
96 blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, name);
97 if (IS_ERR(blkif->xenblkd)) {
98 err = PTR_ERR(blkif->xenblkd);
99 blkif->xenblkd = NULL;
100 xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
101 }
102}
103
104static struct xen_blkif *xen_blkif_alloc(domid_t domid)
105{
106 struct xen_blkif *blkif;
107
108 blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
109 if (!blkif)
110 return ERR_PTR(-ENOMEM);
111
112 memset(blkif, 0, sizeof(*blkif));
113 blkif->domid = domid;
114 spin_lock_init(&blkif->blk_ring_lock);
115 atomic_set(&blkif->refcnt, 1);
116 init_waitqueue_head(&blkif->wq);
117 blkif->st_print = jiffies;
118 init_waitqueue_head(&blkif->waiting_to_free);
119
120 return blkif;
121}
122
123static int map_frontend_page(struct xen_blkif *blkif, unsigned long shared_page)
124{
125 struct gnttab_map_grant_ref op;
126
127 gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
128 GNTMAP_host_map, shared_page, blkif->domid);
129
130 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
131 BUG();
132
133 if (op.status) {
134 DPRINTK("Grant table operation failure !\n");
135 return op.status;
136 }
137
138 blkif->shmem_ref = shared_page;
139 blkif->shmem_handle = op.handle;
140
141 return 0;
142}
143
144static void unmap_frontend_page(struct xen_blkif *blkif)
145{
146 struct gnttab_unmap_grant_ref op;
147
148 gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
149 GNTMAP_host_map, blkif->shmem_handle);
150
151 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
152 BUG();
153}
154
155static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
156 unsigned int evtchn)
157{
158 int err;
159
160 /* Already connected through? */
161 if (blkif->irq)
162 return 0;
163
164 blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
165 if (!blkif->blk_ring_area)
166 return -ENOMEM;
167
168 err = map_frontend_page(blkif, shared_page);
169 if (err) {
170 free_vm_area(blkif->blk_ring_area);
171 return err;
172 }
173
174 switch (blkif->blk_protocol) {
175 case BLKIF_PROTOCOL_NATIVE:
176 {
177 struct blkif_sring *sring;
178 sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
179 BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
180 break;
181 }
182 case BLKIF_PROTOCOL_X86_32:
183 {
184 struct blkif_x86_32_sring *sring_x86_32;
185 sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
186 BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
187 break;
188 }
189 case BLKIF_PROTOCOL_X86_64:
190 {
191 struct blkif_x86_64_sring *sring_x86_64;
192 sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
193 BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
194 break;
195 }
196 default:
197 BUG();
198 }
199
200 err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
201 xen_blkif_be_int, 0,
202 "blkif-backend", blkif);
203 if (err < 0) {
204 unmap_frontend_page(blkif);
205 free_vm_area(blkif->blk_ring_area);
206 blkif->blk_rings.common.sring = NULL;
207 return err;
208 }
209 blkif->irq = err;
210
211 return 0;
212}
213
214static void xen_blkif_disconnect(struct xen_blkif *blkif)
215{
216 if (blkif->xenblkd) {
217 kthread_stop(blkif->xenblkd);
218 blkif->xenblkd = NULL;
219 }
220
221 atomic_dec(&blkif->refcnt);
222 wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
223 atomic_inc(&blkif->refcnt);
224
225 if (blkif->irq) {
226 unbind_from_irqhandler(blkif->irq, blkif);
227 blkif->irq = 0;
228 }
229
230 if (blkif->blk_rings.common.sring) {
231 unmap_frontend_page(blkif);
232 free_vm_area(blkif->blk_ring_area);
233 blkif->blk_rings.common.sring = NULL;
234 }
235}
236
237void xen_blkif_free(struct xen_blkif *blkif)
238{
239 if (!atomic_dec_and_test(&blkif->refcnt))
240 BUG();
241 kmem_cache_free(xen_blkif_cachep, blkif);
242}
243
244int __init xen_blkif_interface_init(void)
245{
246 xen_blkif_cachep = kmem_cache_create("blkif_cache",
247 sizeof(struct xen_blkif),
248 0, 0, NULL);
249 if (!xen_blkif_cachep)
250 return -ENOMEM;
251
252 return 0;
253}
254
255/*
256 * sysfs interface for VBD I/O requests
257 */
258
259#define VBD_SHOW(name, format, args...) \
260 static ssize_t show_##name(struct device *_dev, \
261 struct device_attribute *attr, \
262 char *buf) \
263 { \
264 struct xenbus_device *dev = to_xenbus_device(_dev); \
265 struct backend_info *be = dev_get_drvdata(&dev->dev); \
266 \
267 return sprintf(buf, format, ##args); \
268 } \
269 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
270
271VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
272VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
273VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
274VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req);
275VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
276VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
277
278static struct attribute *xen_vbdstat_attrs[] = {
279 &dev_attr_oo_req.attr,
280 &dev_attr_rd_req.attr,
281 &dev_attr_wr_req.attr,
282 &dev_attr_f_req.attr,
283 &dev_attr_rd_sect.attr,
284 &dev_attr_wr_sect.attr,
285 NULL
286};
287
288static struct attribute_group xen_vbdstat_group = {
289 .name = "statistics",
290 .attrs = xen_vbdstat_attrs,
291};
292
293VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
294VBD_SHOW(mode, "%s\n", be->mode);
295
296int xenvbd_sysfs_addif(struct xenbus_device *dev)
297{
298 int error;
299
300 error = device_create_file(&dev->dev, &dev_attr_physical_device);
301 if (error)
302 goto fail1;
303
304 error = device_create_file(&dev->dev, &dev_attr_mode);
305 if (error)
306 goto fail2;
307
308 error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
309 if (error)
310 goto fail3;
311
312 return 0;
313
314fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
315fail2: device_remove_file(&dev->dev, &dev_attr_mode);
316fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
317 return error;
318}
319
320void xenvbd_sysfs_delif(struct xenbus_device *dev)
321{
322 sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
323 device_remove_file(&dev->dev, &dev_attr_mode);
324 device_remove_file(&dev->dev, &dev_attr_physical_device);
325}
326
327
328static void xen_vbd_free(struct xen_vbd *vbd)
329{
330 if (vbd->bdev)
331 blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
332 vbd->bdev = NULL;
333}
334
335static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
336 unsigned major, unsigned minor, int readonly,
337 int cdrom)
338{
339 struct xen_vbd *vbd;
340 struct block_device *bdev;
341 struct request_queue *q;
342
343 vbd = &blkif->vbd;
344 vbd->handle = handle;
345 vbd->readonly = readonly;
346 vbd->type = 0;
347
348 vbd->pdevice = MKDEV(major, minor);
349
350 bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
351 FMODE_READ : FMODE_WRITE, NULL);
352
353 if (IS_ERR(bdev)) {
354 DPRINTK("xen_vbd_create: device %08x could not be opened.\n",
355 vbd->pdevice);
356 return -ENOENT;
357 }
358
359 vbd->bdev = bdev;
360 vbd->size = vbd_sz(vbd);
361
362 if (vbd->bdev->bd_disk == NULL) {
363 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
364 vbd->pdevice);
365 xen_vbd_free(vbd);
366 return -ENOENT;
367 }
368
369 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
370 vbd->type |= VDISK_CDROM;
371 if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
372 vbd->type |= VDISK_REMOVABLE;
373
374 q = bdev_get_queue(bdev);
375 if (q && q->flush_flags)
376 vbd->flush_support = true;
377
378 DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
379 handle, blkif->domid);
380 return 0;
381}
382static int xen_blkbk_remove(struct xenbus_device *dev)
383{
384 struct backend_info *be = dev_get_drvdata(&dev->dev);
385
386 DPRINTK("");
387
388 if (be->major || be->minor)
389 xenvbd_sysfs_delif(dev);
390
391 if (be->backend_watch.node) {
392 unregister_xenbus_watch(&be->backend_watch);
393 kfree(be->backend_watch.node);
394 be->backend_watch.node = NULL;
395 }
396
397 if (be->blkif) {
398 xen_blkif_disconnect(be->blkif);
399 xen_vbd_free(&be->blkif->vbd);
400 xen_blkif_free(be->blkif);
401 be->blkif = NULL;
402 }
403
404 kfree(be);
405 dev_set_drvdata(&dev->dev, NULL);
406 return 0;
407}
408
409int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
410 struct backend_info *be, int state)
411{
412 struct xenbus_device *dev = be->dev;
413 int err;
414
415 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
416 "%d", state);
417 if (err)
418 xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
419
420 return err;
421}
422
423/*
424 * Entry point to this code when a new device is created. Allocate the basic
425 * structures, and watch the store waiting for the hotplug scripts to tell us
426 * the device's physical major and minor numbers. Switch to InitWait.
427 */
428static int xen_blkbk_probe(struct xenbus_device *dev,
429 const struct xenbus_device_id *id)
430{
431 int err;
432 struct backend_info *be = kzalloc(sizeof(struct backend_info),
433 GFP_KERNEL);
434 if (!be) {
435 xenbus_dev_fatal(dev, -ENOMEM,
436 "allocating backend structure");
437 return -ENOMEM;
438 }
439 be->dev = dev;
440 dev_set_drvdata(&dev->dev, be);
441
442 be->blkif = xen_blkif_alloc(dev->otherend_id);
443 if (IS_ERR(be->blkif)) {
444 err = PTR_ERR(be->blkif);
445 be->blkif = NULL;
446 xenbus_dev_fatal(dev, err, "creating block interface");
447 goto fail;
448 }
449
450 /* setup back pointer */
451 be->blkif->be = be;
452
453 err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
454 "%s/%s", dev->nodename, "physical-device");
455 if (err)
456 goto fail;
457
458 err = xenbus_switch_state(dev, XenbusStateInitWait);
459 if (err)
460 goto fail;
461
462 return 0;
463
464fail:
465 DPRINTK("failed");
466 xen_blkbk_remove(dev);
467 return err;
468}
469
470
471/*
472 * Callback received when the hotplug scripts have placed the physical-device
473 * node. Read it and the mode node, and create a vbd. If the frontend is
474 * ready, connect.
475 */
476static void backend_changed(struct xenbus_watch *watch,
477 const char **vec, unsigned int len)
478{
479 int err;
480 unsigned major;
481 unsigned minor;
482 struct backend_info *be
483 = container_of(watch, struct backend_info, backend_watch);
484 struct xenbus_device *dev = be->dev;
485 int cdrom = 0;
486 char *device_type;
487
488 DPRINTK("");
489
490 err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
491 &major, &minor);
492 if (XENBUS_EXIST_ERR(err)) {
493 /*
494 * Since this watch will fire once immediately after it is
495 * registered, we expect this. Ignore it, and wait for the
496 * hotplug scripts.
497 */
498 return;
499 }
500 if (err != 2) {
501 xenbus_dev_fatal(dev, err, "reading physical-device");
502 return;
503 }
504
505 if ((be->major || be->minor) &&
506 ((be->major != major) || (be->minor != minor))) {
507 pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
508 be->major, be->minor, major, minor);
509 return;
510 }
511
512 be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
513 if (IS_ERR(be->mode)) {
514 err = PTR_ERR(be->mode);
515 be->mode = NULL;
516 xenbus_dev_fatal(dev, err, "reading mode");
517 return;
518 }
519
520 device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
521 if (!IS_ERR(device_type)) {
522 cdrom = strcmp(device_type, "cdrom") == 0;
523 kfree(device_type);
524 }
525
526 if (be->major == 0 && be->minor == 0) {
527 /* Front end dir is a number, which is used as the handle. */
528
529 char *p = strrchr(dev->otherend, '/') + 1;
530 long handle;
531 err = strict_strtoul(p, 0, &handle);
532 if (err)
533 return;
534
535 be->major = major;
536 be->minor = minor;
537
538 err = xen_vbd_create(be->blkif, handle, major, minor,
539 (NULL == strchr(be->mode, 'w')), cdrom);
540 if (err) {
541 be->major = 0;
542 be->minor = 0;
543 xenbus_dev_fatal(dev, err, "creating vbd structure");
544 return;
545 }
546
547 err = xenvbd_sysfs_addif(dev);
548 if (err) {
549 xen_vbd_free(&be->blkif->vbd);
550 be->major = 0;
551 be->minor = 0;
552 xenbus_dev_fatal(dev, err, "creating sysfs entries");
553 return;
554 }
555
556 /* We're potentially connected now */
557 xen_update_blkif_status(be->blkif);
558 }
559}
560
561
562/*
563 * Callback received when the frontend's state changes.
564 */
565static void frontend_changed(struct xenbus_device *dev,
566 enum xenbus_state frontend_state)
567{
568 struct backend_info *be = dev_get_drvdata(&dev->dev);
569 int err;
570
571 DPRINTK("%s", xenbus_strstate(frontend_state));
572
573 switch (frontend_state) {
574 case XenbusStateInitialising:
575 if (dev->state == XenbusStateClosed) {
576 pr_info(DRV_PFX "%s: prepare for reconnect\n",
577 dev->nodename);
578 xenbus_switch_state(dev, XenbusStateInitWait);
579 }
580 break;
581
582 case XenbusStateInitialised:
583 case XenbusStateConnected:
584 /*
585 * Ensure we connect even when two watches fire in
586 * close successsion and we miss the intermediate value
587 * of frontend_state.
588 */
589 if (dev->state == XenbusStateConnected)
590 break;
591
592 /*
593 * Enforce precondition before potential leak point.
594 * blkif_disconnect() is idempotent.
595 */
596 xen_blkif_disconnect(be->blkif);
597
598 err = connect_ring(be);
599 if (err)
600 break;
601 xen_update_blkif_status(be->blkif);
602 break;
603
604 case XenbusStateClosing:
605 xen_blkif_disconnect(be->blkif);
606 xenbus_switch_state(dev, XenbusStateClosing);
607 break;
608
609 case XenbusStateClosed:
610 xenbus_switch_state(dev, XenbusStateClosed);
611 if (xenbus_dev_is_online(dev))
612 break;
613 /* fall through if not online */
614 case XenbusStateUnknown:
615 /* implies blkif_disconnect() via blkback_remove() */
616 device_unregister(&dev->dev);
617 break;
618
619 default:
620 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
621 frontend_state);
622 break;
623 }
624}
625
626
627/* ** Connection ** */
628
629
630/*
631 * Write the physical details regarding the block device to the store, and
632 * switch to Connected state.
633 */
634static void connect(struct backend_info *be)
635{
636 struct xenbus_transaction xbt;
637 int err;
638 struct xenbus_device *dev = be->dev;
639
640 DPRINTK("%s", dev->otherend);
641
642 /* Supply the information about the device the frontend needs */
643again:
644 err = xenbus_transaction_start(&xbt);
645 if (err) {
646 xenbus_dev_fatal(dev, err, "starting transaction");
647 return;
648 }
649
650 err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
651 if (err)
652 goto abort;
653
654 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
655 (unsigned long long)vbd_sz(&be->blkif->vbd));
656 if (err) {
657 xenbus_dev_fatal(dev, err, "writing %s/sectors",
658 dev->nodename);
659 goto abort;
660 }
661
662 /* FIXME: use a typename instead */
663 err = xenbus_printf(xbt, dev->nodename, "info", "%u",
664 be->blkif->vbd.type |
665 (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
666 if (err) {
667 xenbus_dev_fatal(dev, err, "writing %s/info",
668 dev->nodename);
669 goto abort;
670 }
671 err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
672 (unsigned long)
673 bdev_logical_block_size(be->blkif->vbd.bdev));
674 if (err) {
675 xenbus_dev_fatal(dev, err, "writing %s/sector-size",
676 dev->nodename);
677 goto abort;
678 }
679
680 err = xenbus_transaction_end(xbt, 0);
681 if (err == -EAGAIN)
682 goto again;
683 if (err)
684 xenbus_dev_fatal(dev, err, "ending transaction");
685
686 err = xenbus_switch_state(dev, XenbusStateConnected);
687 if (err)
688 xenbus_dev_fatal(dev, err, "switching to Connected state",
689 dev->nodename);
690
691 return;
692 abort:
693 xenbus_transaction_end(xbt, 1);
694}
695
696
697static int connect_ring(struct backend_info *be)
698{
699 struct xenbus_device *dev = be->dev;
700 unsigned long ring_ref;
701 unsigned int evtchn;
702 char protocol[64] = "";
703 int err;
704
705 DPRINTK("%s", dev->otherend);
706
707 err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
708 &ring_ref, "event-channel", "%u", &evtchn, NULL);
709 if (err) {
710 xenbus_dev_fatal(dev, err,
711 "reading %s/ring-ref and event-channel",
712 dev->otherend);
713 return err;
714 }
715
716 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
717 err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
718 "%63s", protocol, NULL);
719 if (err)
720 strcpy(protocol, "unspecified, assuming native");
721 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
722 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
723 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
724 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
725 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
726 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
727 else {
728 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
729 return -1;
730 }
731 pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s)\n",
732 ring_ref, evtchn, be->blkif->blk_protocol, protocol);
733
734 /* Map the shared frame, irq etc. */
735 err = xen_blkif_map(be->blkif, ring_ref, evtchn);
736 if (err) {
737 xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
738 ring_ref, evtchn);
739 return err;
740 }
741
742 return 0;
743}
744
745
746/* ** Driver Registration ** */
747
748
749static const struct xenbus_device_id xen_blkbk_ids[] = {
750 { "vbd" },
751 { "" }
752};
753
754
755static struct xenbus_driver xen_blkbk = {
756 .name = "vbd",
757 .owner = THIS_MODULE,
758 .ids = xen_blkbk_ids,
759 .probe = xen_blkbk_probe,
760 .remove = xen_blkbk_remove,
761 .otherend_changed = frontend_changed
762};
763
764
765int xen_blkif_xenbus_init(void)
766{
767 return xenbus_register_backend(&xen_blkbk);
768}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9cb8668ff5f4..b536a9cef917 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -97,6 +97,7 @@ struct blkfront_info
97 struct blk_shadow shadow[BLK_RING_SIZE]; 97 struct blk_shadow shadow[BLK_RING_SIZE];
98 unsigned long shadow_free; 98 unsigned long shadow_free;
99 unsigned int feature_flush; 99 unsigned int feature_flush;
100 unsigned int flush_op;
100 int is_ready; 101 int is_ready;
101}; 102};
102 103
@@ -250,8 +251,7 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
250 251
251/* 252/*
252 * Generate a Xen blkfront IO request from a blk layer request. Reads 253 * Generate a Xen blkfront IO request from a blk layer request. Reads
253 * and writes are handled as expected. Since we lack a loose flush 254 * and writes are handled as expected.
254 * request, we map flushes into a full ordered barrier.
255 * 255 *
256 * @req: a request struct 256 * @req: a request struct
257 */ 257 */
@@ -293,14 +293,13 @@ static int blkif_queue_request(struct request *req)
293 293
294 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { 294 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
295 /* 295 /*
296 * Ideally we could just do an unordered 296 * Ideally we can do an unordered flush-to-disk. In case the
297 * flush-to-disk, but all we have is a full write 297 * backend onlysupports barriers, use that. A barrier request
298 * barrier at the moment. However, a barrier write is
299 * a superset of FUA, so we can implement it the same 298 * a superset of FUA, so we can implement it the same
300 * way. (It's also a FLUSH+FUA, since it is 299 * way. (It's also a FLUSH+FUA, since it is
301 * guaranteed ordered WRT previous writes.) 300 * guaranteed ordered WRT previous writes.)
302 */ 301 */
303 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 302 ring_req->operation = info->flush_op;
304 } 303 }
305 304
306 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 305 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
@@ -433,8 +432,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
433static void xlvbd_flush(struct blkfront_info *info) 432static void xlvbd_flush(struct blkfront_info *info)
434{ 433{
435 blk_queue_flush(info->rq, info->feature_flush); 434 blk_queue_flush(info->rq, info->feature_flush);
436 printk(KERN_INFO "blkfront: %s: barriers %s\n", 435 printk(KERN_INFO "blkfront: %s: %s: %s\n",
437 info->gd->disk_name, 436 info->gd->disk_name,
437 info->flush_op == BLKIF_OP_WRITE_BARRIER ?
438 "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
439 "flush diskcache" : "barrier or flush"),
438 info->feature_flush ? "enabled" : "disabled"); 440 info->feature_flush ? "enabled" : "disabled");
439} 441}
440 442
@@ -720,15 +722,20 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
720 722
721 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; 723 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
722 switch (bret->operation) { 724 switch (bret->operation) {
725 case BLKIF_OP_FLUSH_DISKCACHE:
723 case BLKIF_OP_WRITE_BARRIER: 726 case BLKIF_OP_WRITE_BARRIER:
724 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 727 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
725 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 728 printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
729 info->flush_op == BLKIF_OP_WRITE_BARRIER ?
730 "barrier" : "flush disk cache",
726 info->gd->disk_name); 731 info->gd->disk_name);
727 error = -EOPNOTSUPP; 732 error = -EOPNOTSUPP;
728 } 733 }
729 if (unlikely(bret->status == BLKIF_RSP_ERROR && 734 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
730 info->shadow[id].req.nr_segments == 0)) { 735 info->shadow[id].req.nr_segments == 0)) {
731 printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", 736 printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
737 info->flush_op == BLKIF_OP_WRITE_BARRIER ?
738 "barrier" : "flush disk cache",
732 info->gd->disk_name); 739 info->gd->disk_name);
733 error = -EOPNOTSUPP; 740 error = -EOPNOTSUPP;
734 } 741 }
@@ -736,6 +743,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
736 if (error == -EOPNOTSUPP) 743 if (error == -EOPNOTSUPP)
737 error = 0; 744 error = 0;
738 info->feature_flush = 0; 745 info->feature_flush = 0;
746 info->flush_op = 0;
739 xlvbd_flush(info); 747 xlvbd_flush(info);
740 } 748 }
741 /* fall through */ 749 /* fall through */
@@ -1100,7 +1108,7 @@ static void blkfront_connect(struct blkfront_info *info)
1100 unsigned long sector_size; 1108 unsigned long sector_size;
1101 unsigned int binfo; 1109 unsigned int binfo;
1102 int err; 1110 int err;
1103 int barrier; 1111 int barrier, flush;
1104 1112
1105 switch (info->connected) { 1113 switch (info->connected) {
1106 case BLKIF_STATE_CONNECTED: 1114 case BLKIF_STATE_CONNECTED:
@@ -1140,8 +1148,11 @@ static void blkfront_connect(struct blkfront_info *info)
1140 return; 1148 return;
1141 } 1149 }
1142 1150
1151 info->feature_flush = 0;
1152 info->flush_op = 0;
1153
1143 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1154 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1144 "feature-barrier", "%lu", &barrier, 1155 "feature-barrier", "%d", &barrier,
1145 NULL); 1156 NULL);
1146 1157
1147 /* 1158 /*
@@ -1151,11 +1162,23 @@ static void blkfront_connect(struct blkfront_info *info)
1151 * 1162 *
1152 * If there are barriers, then we use flush. 1163 * If there are barriers, then we use flush.
1153 */ 1164 */
1154 info->feature_flush = 0; 1165 if (!err && barrier) {
1155
1156 if (!err && barrier)
1157 info->feature_flush = REQ_FLUSH | REQ_FUA; 1166 info->feature_flush = REQ_FLUSH | REQ_FUA;
1167 info->flush_op = BLKIF_OP_WRITE_BARRIER;
1168 }
1169 /*
1170 * And if there is "feature-flush-cache" use that above
1171 * barriers.
1172 */
1173 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1174 "feature-flush-cache", "%d", &flush,
1175 NULL);
1158 1176
1177 if (!err && flush) {
1178 info->feature_flush = REQ_FLUSH;
1179 info->flush_op = BLKIF_OP_FLUSH_DISKCACHE;
1180 }
1181
1159 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1182 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
1160 if (err) { 1183 if (err) {
1161 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 1184 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index e427fbe45999..ae15a4ddaa9b 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -625,7 +625,9 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
625 blk_queue_max_hw_sectors(q, 4096 / 512); 625 blk_queue_max_hw_sectors(q, 4096 / 512);
626 gendisk->queue = q; 626 gendisk->queue = q;
627 gendisk->fops = &viocd_fops; 627 gendisk->fops = &viocd_fops;
628 gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; 628 gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
629 GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
630 gendisk->events = DISK_EVENT_MEDIA_CHANGE;
629 set_capacity(gendisk, 0); 631 set_capacity(gendisk, 0);
630 gendisk->private_data = d; 632 gendisk->private_data = d;
631 d->viocd_disk = gendisk; 633 d->viocd_disk = gendisk;
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index c7f1a6f16b6e..e2fc2d21fa61 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -39,3 +39,5 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
39 39
40##################################################################################d 40##################################################################################d
41 41
42# ARM SoC drivers
43obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
new file mode 100644
index 000000000000..d90456a809f9
--- /dev/null
+++ b/drivers/cpufreq/db8500-cpufreq.c
@@ -0,0 +1,169 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * License Terms: GNU General Public License v2
6 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
7 * Author: Martin Persson <martin.persson@stericsson.com>
8 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/cpufreq.h>
13#include <linux/delay.h>
14#include <linux/slab.h>
15#include <linux/mfd/db8500-prcmu.h>
16#include <mach/id.h>
17
18static struct cpufreq_frequency_table freq_table[] = {
19 [0] = {
20 .index = 0,
21 .frequency = 300000,
22 },
23 [1] = {
24 .index = 1,
25 .frequency = 600000,
26 },
27 [2] = {
28 /* Used for MAX_OPP, if available */
29 .index = 2,
30 .frequency = CPUFREQ_TABLE_END,
31 },
32 [3] = {
33 .index = 3,
34 .frequency = CPUFREQ_TABLE_END,
35 },
36};
37
38static enum arm_opp idx2opp[] = {
39 ARM_50_OPP,
40 ARM_100_OPP,
41 ARM_MAX_OPP
42};
43
44static struct freq_attr *db8500_cpufreq_attr[] = {
45 &cpufreq_freq_attr_scaling_available_freqs,
46 NULL,
47};
48
49static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
50{
51 return cpufreq_frequency_table_verify(policy, freq_table);
52}
53
54static int db8500_cpufreq_target(struct cpufreq_policy *policy,
55 unsigned int target_freq,
56 unsigned int relation)
57{
58 struct cpufreq_freqs freqs;
59 unsigned int idx;
60
61 /* scale the target frequency to one of the extremes supported */
62 if (target_freq < policy->cpuinfo.min_freq)
63 target_freq = policy->cpuinfo.min_freq;
64 if (target_freq > policy->cpuinfo.max_freq)
65 target_freq = policy->cpuinfo.max_freq;
66
67 /* Lookup the next frequency */
68 if (cpufreq_frequency_table_target
69 (policy, freq_table, target_freq, relation, &idx)) {
70 return -EINVAL;
71 }
72
73 freqs.old = policy->cur;
74 freqs.new = freq_table[idx].frequency;
75 freqs.cpu = policy->cpu;
76
77 if (freqs.old == freqs.new)
78 return 0;
79
80 /* pre-change notification */
81 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
82
83 /* request the PRCM unit for opp change */
84 if (prcmu_set_arm_opp(idx2opp[idx])) {
85 pr_err("db8500-cpufreq: Failed to set OPP level\n");
86 return -EINVAL;
87 }
88
89 /* post change notification */
90 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
91
92 return 0;
93}
94
95static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
96{
97 int i;
98 /* request the prcm to get the current ARM opp */
99 for (i = 0; prcmu_get_arm_opp() != idx2opp[i]; i++)
100 ;
101 return freq_table[i].frequency;
102}
103
104static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
105{
106 int res;
107 int i;
108
109 BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table));
110
111 if (cpu_is_u8500v2() && !prcmu_is_u8400()) {
112 freq_table[0].frequency = 400000;
113 freq_table[1].frequency = 800000;
114 if (prcmu_has_arm_maxopp())
115 freq_table[2].frequency = 1000000;
116 }
117
118 /* get policy fields based on the table */
119 res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
120 if (!res)
121 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
122 else {
123 pr_err("db8500-cpufreq : Failed to read policy table\n");
124 return res;
125 }
126
127 policy->min = policy->cpuinfo.min_freq;
128 policy->max = policy->cpuinfo.max_freq;
129 policy->cur = db8500_cpufreq_getspeed(policy->cpu);
130
131 for (i = 0; freq_table[i].frequency != policy->cur; i++)
132 ;
133
134 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
135
136 /*
137 * FIXME : Need to take time measurement across the target()
138 * function with no/some/all drivers in the notification
139 * list.
140 */
141 policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
142
143 /* policy sharing between dual CPUs */
144 cpumask_copy(policy->cpus, &cpu_present_map);
145
146 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
147
148 return 0;
149}
150
151static struct cpufreq_driver db8500_cpufreq_driver = {
152 .flags = CPUFREQ_STICKY,
153 .verify = db8500_cpufreq_verify_speed,
154 .target = db8500_cpufreq_target,
155 .get = db8500_cpufreq_getspeed,
156 .init = db8500_cpufreq_init,
157 .name = "DB8500",
158 .attr = db8500_cpufreq_attr,
159};
160
161static int __init db8500_cpufreq_register(void)
162{
163 if (!cpu_is_u8500v20_or_later())
164 return -ENODEV;
165
166 pr_info("cpufreq for DB8500 started\n");
167 return cpufreq_register_driver(&db8500_cpufreq_driver);
168}
169device_initcall(db8500_cpufreq_register);
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index dcc1b2139fff..636e40925b16 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -213,12 +213,17 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
213 struct sh_dmae_device, common); 213 struct sh_dmae_device, common);
214 struct sh_dmae_pdata *pdata = shdev->pdata; 214 struct sh_dmae_pdata *pdata = shdev->pdata;
215 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 215 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
216 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); 216 u16 __iomem *addr = shdev->dmars;
217 int shift = chan_pdata->dmars_bit; 217 int shift = chan_pdata->dmars_bit;
218 218
219 if (dmae_is_busy(sh_chan)) 219 if (dmae_is_busy(sh_chan))
220 return -EBUSY; 220 return -EBUSY;
221 221
222 /* in the case of a missing DMARS resource use first memory window */
223 if (!addr)
224 addr = (u16 __iomem *)shdev->chan_reg;
225 addr += chan_pdata->dmars / sizeof(u16);
226
222 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 227 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
223 addr); 228 addr);
224 229
@@ -1078,7 +1083,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1078 unsigned long irqflags = IRQF_DISABLED, 1083 unsigned long irqflags = IRQF_DISABLED,
1079 chan_flag[SH_DMAC_MAX_CHANNELS] = {}; 1084 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1080 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; 1085 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
1081 int err, i, irq_cnt = 0, irqres = 0; 1086 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
1082 struct sh_dmae_device *shdev; 1087 struct sh_dmae_device *shdev;
1083 struct resource *chan, *dmars, *errirq_res, *chanirq_res; 1088 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
1084 1089
@@ -1087,7 +1092,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1087 return -ENODEV; 1092 return -ENODEV;
1088 1093
1089 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1094 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1090 /* DMARS area is optional, if absent, this controller cannot do slave DMA */ 1095 /* DMARS area is optional */
1091 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1096 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1092 /* 1097 /*
1093 * IRQ resources: 1098 * IRQ resources:
@@ -1154,7 +1159,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1154 INIT_LIST_HEAD(&shdev->common.channels); 1159 INIT_LIST_HEAD(&shdev->common.channels);
1155 1160
1156 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 1161 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1157 if (dmars) 1162 if (pdata->slave && pdata->slave_num)
1158 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 1163 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1159 1164
1160 shdev->common.device_alloc_chan_resources 1165 shdev->common.device_alloc_chan_resources
@@ -1203,8 +1208,13 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1203 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 1208 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1204 /* Special case - all multiplexed */ 1209 /* Special case - all multiplexed */
1205 for (; irq_cnt < pdata->channel_num; irq_cnt++) { 1210 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1206 chan_irq[irq_cnt] = chanirq_res->start; 1211 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1207 chan_flag[irq_cnt] = IRQF_SHARED; 1212 chan_irq[irq_cnt] = chanirq_res->start;
1213 chan_flag[irq_cnt] = IRQF_SHARED;
1214 } else {
1215 irq_cap = 1;
1216 break;
1217 }
1208 } 1218 }
1209 } else { 1219 } else {
1210 do { 1220 do {
@@ -1218,22 +1228,32 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1218 "Found IRQ %d for channel %d\n", 1228 "Found IRQ %d for channel %d\n",
1219 i, irq_cnt); 1229 i, irq_cnt);
1220 chan_irq[irq_cnt++] = i; 1230 chan_irq[irq_cnt++] = i;
1231
1232 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1233 break;
1234 }
1235
1236 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1237 irq_cap = 1;
1238 break;
1221 } 1239 }
1222 chanirq_res = platform_get_resource(pdev, 1240 chanirq_res = platform_get_resource(pdev,
1223 IORESOURCE_IRQ, ++irqres); 1241 IORESOURCE_IRQ, ++irqres);
1224 } while (irq_cnt < pdata->channel_num && chanirq_res); 1242 } while (irq_cnt < pdata->channel_num && chanirq_res);
1225 } 1243 }
1226 1244
1227 if (irq_cnt < pdata->channel_num)
1228 goto eirqres;
1229
1230 /* Create DMA Channel */ 1245 /* Create DMA Channel */
1231 for (i = 0; i < pdata->channel_num; i++) { 1246 for (i = 0; i < irq_cnt; i++) {
1232 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 1247 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1233 if (err) 1248 if (err)
1234 goto chan_probe_err; 1249 goto chan_probe_err;
1235 } 1250 }
1236 1251
1252 if (irq_cap)
1253 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1254 "channels when a maximum of %d are supported.\n",
1255 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1256
1237 pm_runtime_put(&pdev->dev); 1257 pm_runtime_put(&pdev->dev);
1238 1258
1239 platform_set_drvdata(pdev, shdev); 1259 platform_set_drvdata(pdev, shdev);
@@ -1243,7 +1263,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1243 1263
1244chan_probe_err: 1264chan_probe_err:
1245 sh_dmae_chan_remove(shdev); 1265 sh_dmae_chan_remove(shdev);
1246eirqres: 1266
1247#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1267#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1248 free_irq(errirq, shdev); 1268 free_irq(errirq, shdev);
1249eirq_err: 1269eirq_err:
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 3f9d3cd06584..5ae9fc512180 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,7 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/list.h> 18#include <linux/list.h>
19 19
20#define SH_DMAC_MAX_CHANNELS 6 20#define SH_DMAC_MAX_CHANNELS 20
21#define SH_DMA_SLAVE_NUMBER 256 21#define SH_DMA_SLAVE_NUMBER 256
22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
23 23
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index d41f9002da45..aa08497a075a 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -101,6 +101,19 @@ struct i3200_priv {
101 101
102static int nr_channels; 102static int nr_channels;
103 103
104#ifndef readq
105static inline __u64 readq(const volatile void __iomem *addr)
106{
107 const volatile u32 __iomem *p = addr;
108 u32 low, high;
109
110 low = readl(p);
111 high = readl(p + 1);
112
113 return low + ((u64)high << 32);
114}
115#endif
116
104static int how_many_channels(struct pci_dev *pdev) 117static int how_many_channels(struct pci_dev *pdev)
105{ 118{
106 unsigned char capid0_8b; /* 8th byte of CAPID0 */ 119 unsigned char capid0_8b; /* 8th byte of CAPID0 */
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
index 0a775f7987c2..1bc621ac3536 100644
--- a/drivers/gpio/ml_ioh_gpio.c
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -15,6 +15,7 @@
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
16 */ 16 */
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/slab.h>
18#include <linux/pci.h> 19#include <linux/pci.h>
19#include <linux/gpio.h> 20#include <linux/gpio.h>
20 21
@@ -138,6 +139,7 @@ static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
138 return 0; 139 return 0;
139} 140}
140 141
142#ifdef CONFIG_PM
141/* 143/*
142 * Save register configuration and disable interrupts. 144 * Save register configuration and disable interrupts.
143 */ 145 */
@@ -157,6 +159,7 @@ static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
157 /* to store contents of PM register */ 159 /* to store contents of PM register */
158 iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm); 160 iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm);
159} 161}
162#endif
160 163
161static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port) 164static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
162{ 165{
diff --git a/drivers/gpio/vx855_gpio.c b/drivers/gpio/vx855_gpio.c
index 8a98ee5d5f6c..ef5aabd8b8b7 100644
--- a/drivers/gpio/vx855_gpio.c
+++ b/drivers/gpio/vx855_gpio.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/slab.h>
29#include <linux/device.h> 30#include <linux/device.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/pci.h> 32#include <linux/pci.h>
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c6289034e29a..0b2e167d2bce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -56,9 +56,7 @@ static int i915_gem_phys_pwrite(struct drm_device *dev,
56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); 56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
57 57
58static int i915_gem_inactive_shrink(struct shrinker *shrinker, 58static int i915_gem_inactive_shrink(struct shrinker *shrinker,
59 int nr_to_scan, 59 struct shrink_control *sc);
60 gfp_t gfp_mask);
61
62 60
63/* some bookkeeping */ 61/* some bookkeeping */
64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 62static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -4092,9 +4090,7 @@ i915_gpu_is_active(struct drm_device *dev)
4092} 4090}
4093 4091
4094static int 4092static int
4095i915_gem_inactive_shrink(struct shrinker *shrinker, 4093i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4096 int nr_to_scan,
4097 gfp_t gfp_mask)
4098{ 4094{
4099 struct drm_i915_private *dev_priv = 4095 struct drm_i915_private *dev_priv =
4100 container_of(shrinker, 4096 container_of(shrinker,
@@ -4102,6 +4098,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker,
4102 mm.inactive_shrinker); 4098 mm.inactive_shrinker);
4103 struct drm_device *dev = dev_priv->dev; 4099 struct drm_device *dev = dev_priv->dev;
4104 struct drm_i915_gem_object *obj, *next; 4100 struct drm_i915_gem_object *obj, *next;
4101 int nr_to_scan = sc->nr_to_scan;
4105 int cnt; 4102 int cnt;
4106 4103
4107 if (!mutex_trylock(&dev->struct_mutex)) 4104 if (!mutex_trylock(&dev->struct_mutex))
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 9d9d92945f8c..d948575717bf 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -395,12 +395,14 @@ static int ttm_pool_get_num_unused_pages(void)
395/** 395/**
396 * Callback for mm to request pool to reduce number of page held. 396 * Callback for mm to request pool to reduce number of page held.
397 */ 397 */
398static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) 398static int ttm_pool_mm_shrink(struct shrinker *shrink,
399 struct shrink_control *sc)
399{ 400{
400 static atomic_t start_pool = ATOMIC_INIT(0); 401 static atomic_t start_pool = ATOMIC_INIT(0);
401 unsigned i; 402 unsigned i;
402 unsigned pool_offset = atomic_add_return(1, &start_pool); 403 unsigned pool_offset = atomic_add_return(1, &start_pool);
403 struct ttm_page_pool *pool; 404 struct ttm_page_pool *pool;
405 int shrink_pages = sc->nr_to_scan;
404 406
405 pool_offset = pool_offset % NUM_POOLS; 407 pool_offset = pool_offset % NUM_POOLS;
406 /* select start pool in round robin fashion */ 408 /* select start pool in round robin fashion */
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index a5ec5a7cb381..6e5123b1d341 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1781,7 +1781,8 @@ static int ide_cd_probe(ide_drive_t *drive)
1781 1781
1782 ide_cd_read_toc(drive, &sense); 1782 ide_cd_read_toc(drive, &sense);
1783 g->fops = &idecd_ops; 1783 g->fops = &idecd_ops;
1784 g->flags |= GENHD_FL_REMOVABLE; 1784 g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
1785 g->events = DISK_EVENT_MEDIA_CHANGE;
1785 add_disk(g); 1786 add_disk(g);
1786 return 0; 1787 return 0;
1787 1788
diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h
index 4d8ea32e8a00..22be27b424de 100644
--- a/drivers/input/input-compat.h
+++ b/drivers/input/input-compat.h
@@ -19,7 +19,7 @@
19 19
20/* Note to the author of this code: did it ever occur to 20/* Note to the author of this code: did it ever occur to
21 you why the ifdefs are needed? Think about it again. -AK */ 21 you why the ifdefs are needed? Think about it again. -AK */
22#ifdef CONFIG_X86_64 22#if defined(CONFIG_X86_64) || defined(CONFIG_TILE)
23# define INPUT_COMPAT_TEST is_compat_task() 23# define INPUT_COMPAT_TEST is_compat_task()
24#elif defined(CONFIG_S390) 24#elif defined(CONFIG_S390)
25# define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT) 25# define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT)
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9bec8699b8a3..1d027b475b22 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -14,6 +14,13 @@ config LEDS_CLASS
14 This option enables the led sysfs class in /sys/class/leds. You'll 14 This option enables the led sysfs class in /sys/class/leds. You'll
15 need this to do anything useful with LEDs. If unsure, say N. 15 need this to do anything useful with LEDs. If unsure, say N.
16 16
17config LEDS_GPIO_REGISTER
18 bool
19 help
20 This option provides the function gpio_led_register_device.
21 As this function is used by arch code it must not be compiled as a
22 module.
23
17if NEW_LEDS 24if NEW_LEDS
18 25
19comment "LED drivers" 26comment "LED drivers"
@@ -115,13 +122,6 @@ config LEDS_ALIX2
115 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. 122 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
116 You have to set leds-alix2.force=1 for boards with Award BIOS. 123 You have to set leds-alix2.force=1 for boards with Award BIOS.
117 124
118config LEDS_H1940
119 tristate "LED Support for iPAQ H1940 device"
120 depends on LEDS_CLASS
121 depends on ARCH_H1940
122 help
123 This option enables support for the LEDs on the h1940.
124
125config LEDS_COBALT_QUBE 125config LEDS_COBALT_QUBE
126 tristate "LED Support for the Cobalt Qube series front LED" 126 tristate "LED Support for the Cobalt Qube series front LED"
127 depends on LEDS_CLASS 127 depends on LEDS_CLASS
@@ -162,6 +162,16 @@ config LEDS_PCA9532
162 LED controller. It is generally only useful 162 LED controller. It is generally only useful
163 as a platform driver 163 as a platform driver
164 164
165config LEDS_PCA9532_GPIO
166 bool "Enable GPIO support for PCA9532"
167 depends on LEDS_PCA9532
168 depends on GPIOLIB
169 help
170 Allow unused pins on PCA9532 to be used as gpio.
171
172 To use a pin as gpio pca9532_type in pca9532_platform data needs to
173 set to PCA9532_TYPE_GPIO.
174
165config LEDS_GPIO 175config LEDS_GPIO
166 tristate "LED Support for GPIO connected LEDs" 176 tristate "LED Support for GPIO connected LEDs"
167 depends on LEDS_CLASS 177 depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 39c80fca84d2..bccb96c9bb45 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -17,11 +17,11 @@ obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
17obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o 17obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o
18obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o 18obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
19obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o 19obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o
20obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
21obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o 20obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
22obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o 21obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
23obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o 22obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
24obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o 23obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
24obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
25obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o 25obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
26obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o 26obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
27obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o 27obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index d5a4ade88991..dc3d3d83191a 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -131,7 +131,8 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
131 if (!led_cdev->blink_brightness) 131 if (!led_cdev->blink_brightness)
132 led_cdev->blink_brightness = led_cdev->max_brightness; 132 led_cdev->blink_brightness = led_cdev->max_brightness;
133 133
134 if (delay_on == led_cdev->blink_delay_on && 134 if (led_get_trigger_data(led_cdev) &&
135 delay_on == led_cdev->blink_delay_on &&
135 delay_off == led_cdev->blink_delay_off) 136 delay_off == led_cdev->blink_delay_off)
136 return; 137 return;
137 138
diff --git a/drivers/leds/leds-gpio-register.c b/drivers/leds/leds-gpio-register.c
new file mode 100644
index 000000000000..1c4ed5510f35
--- /dev/null
+++ b/drivers/leds/leds-gpio-register.c
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) 2011 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <linux/err.h>
10#include <linux/platform_device.h>
11#include <linux/slab.h>
12#include <linux/leds.h>
13
14/**
15 * gpio_led_register_device - register a gpio-led device
16 * @pdata: the platform data used for the new device
17 *
18 * Makes a copy of pdata and pdata->leds and registers a new leds-gpio device
19 * with the result. This allows to have pdata and pdata-leds in .init.rodata
20 * and so saves some bytes compared to a static struct platform_device with
21 * static platform data.
22 *
23 * Returns the registered device or an error pointer.
24 */
25struct platform_device *__init gpio_led_register_device(
26 int id, const struct gpio_led_platform_data *pdata)
27{
28 struct platform_device *ret;
29 struct gpio_led_platform_data _pdata = *pdata;
30
31 _pdata.leds = kmemdup(pdata->leds,
32 pdata->num_leds * sizeof(*pdata->leds), GFP_KERNEL);
33 if (!_pdata.leds)
34 return ERR_PTR(-ENOMEM);
35
36 ret = platform_device_register_resndata(NULL, "leds-gpio", id,
37 NULL, 0, &_pdata, sizeof(_pdata));
38 if (IS_ERR(ret))
39 kfree(_pdata.leds);
40
41 return ret;
42}
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
deleted file mode 100644
index 173d104d9ff2..000000000000
--- a/drivers/leds/leds-h1940.c
+++ /dev/null
@@ -1,170 +0,0 @@
1/*
2 * drivers/leds/leds-h1940.c
3 * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org>
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file COPYING in the main directory of this archive for
7 * more details.
8 *
9 * H1940 leds driver
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/delay.h>
16#include <linux/string.h>
17#include <linux/ctype.h>
18#include <linux/leds.h>
19#include <linux/gpio.h>
20
21#include <mach/regs-gpio.h>
22#include <mach/hardware.h>
23#include <mach/h1940-latch.h>
24
25/*
26 * Green led.
27 */
28static void h1940_greenled_set(struct led_classdev *led_dev,
29 enum led_brightness value)
30{
31 switch (value) {
32 case LED_HALF:
33 h1940_latch_control(0, H1940_LATCH_LED_FLASH);
34 s3c2410_gpio_setpin(S3C2410_GPA7, 1);
35 break;
36 case LED_FULL:
37 h1940_latch_control(0, H1940_LATCH_LED_GREEN);
38 s3c2410_gpio_setpin(S3C2410_GPA7, 1);
39 break;
40 default:
41 case LED_OFF:
42 h1940_latch_control(H1940_LATCH_LED_FLASH, 0);
43 h1940_latch_control(H1940_LATCH_LED_GREEN, 0);
44 s3c2410_gpio_setpin(S3C2410_GPA7, 0);
45 break;
46 }
47}
48
49static struct led_classdev h1940_greenled = {
50 .name = "h1940:green",
51 .brightness_set = h1940_greenled_set,
52 .default_trigger = "h1940-charger",
53};
54
55/*
56 * Red led.
57 */
58static void h1940_redled_set(struct led_classdev *led_dev,
59 enum led_brightness value)
60{
61 switch (value) {
62 case LED_HALF:
63 h1940_latch_control(0, H1940_LATCH_LED_FLASH);
64 s3c2410_gpio_setpin(S3C2410_GPA1, 1);
65 break;
66 case LED_FULL:
67 h1940_latch_control(0, H1940_LATCH_LED_RED);
68 s3c2410_gpio_setpin(S3C2410_GPA1, 1);
69 break;
70 default:
71 case LED_OFF:
72 h1940_latch_control(H1940_LATCH_LED_FLASH, 0);
73 h1940_latch_control(H1940_LATCH_LED_RED, 0);
74 s3c2410_gpio_setpin(S3C2410_GPA1, 0);
75 break;
76 }
77}
78
79static struct led_classdev h1940_redled = {
80 .name = "h1940:red",
81 .brightness_set = h1940_redled_set,
82 .default_trigger = "h1940-charger",
83};
84
85/*
86 * Blue led.
87 * (it can only be blue flashing led)
88 */
89static void h1940_blueled_set(struct led_classdev *led_dev,
90 enum led_brightness value)
91{
92 if (value) {
93 /* flashing Blue */
94 h1940_latch_control(0, H1940_LATCH_LED_FLASH);
95 s3c2410_gpio_setpin(S3C2410_GPA3, 1);
96 } else {
97 h1940_latch_control(H1940_LATCH_LED_FLASH, 0);
98 s3c2410_gpio_setpin(S3C2410_GPA3, 0);
99 }
100
101}
102
103static struct led_classdev h1940_blueled = {
104 .name = "h1940:blue",
105 .brightness_set = h1940_blueled_set,
106 .default_trigger = "h1940-bluetooth",
107};
108
109static int __devinit h1940leds_probe(struct platform_device *pdev)
110{
111 int ret;
112
113 ret = led_classdev_register(&pdev->dev, &h1940_greenled);
114 if (ret)
115 goto err_green;
116
117 ret = led_classdev_register(&pdev->dev, &h1940_redled);
118 if (ret)
119 goto err_red;
120
121 ret = led_classdev_register(&pdev->dev, &h1940_blueled);
122 if (ret)
123 goto err_blue;
124
125 return 0;
126
127err_blue:
128 led_classdev_unregister(&h1940_redled);
129err_red:
130 led_classdev_unregister(&h1940_greenled);
131err_green:
132 return ret;
133}
134
135static int h1940leds_remove(struct platform_device *pdev)
136{
137 led_classdev_unregister(&h1940_greenled);
138 led_classdev_unregister(&h1940_redled);
139 led_classdev_unregister(&h1940_blueled);
140 return 0;
141}
142
143
144static struct platform_driver h1940leds_driver = {
145 .driver = {
146 .name = "h1940-leds",
147 .owner = THIS_MODULE,
148 },
149 .probe = h1940leds_probe,
150 .remove = h1940leds_remove,
151};
152
153
154static int __init h1940leds_init(void)
155{
156 return platform_driver_register(&h1940leds_driver);
157}
158
159static void __exit h1940leds_exit(void)
160{
161 platform_driver_unregister(&h1940leds_driver);
162}
163
164module_init(h1940leds_init);
165module_exit(h1940leds_exit);
166
167MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
168MODULE_DESCRIPTION("LED driver for the iPAQ H1940");
169MODULE_LICENSE("GPL");
170MODULE_ALIAS("platform:h1940-leds");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index b37e6186d0fa..4d7ce7631acf 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -17,6 +17,7 @@
17#include <linux/input.h> 17#include <linux/input.h>
18#include <linux/led-lm3530.h> 18#include <linux/led-lm3530.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/regulator/consumer.h>
20 21
21#define LM3530_LED_DEV "lcd-backlight" 22#define LM3530_LED_DEV "lcd-backlight"
22#define LM3530_NAME "lm3530-led" 23#define LM3530_NAME "lm3530-led"
@@ -96,12 +97,18 @@ static struct lm3530_mode_map mode_map[] = {
96 * @client: i2c client 97 * @client: i2c client
97 * @pdata: LM3530 platform data 98 * @pdata: LM3530 platform data
98 * @mode: mode of operation - manual, ALS, PWM 99 * @mode: mode of operation - manual, ALS, PWM
100 * @regulator: regulator
101 * @brighness: previous brightness value
102 * @enable: regulator is enabled
99 */ 103 */
100struct lm3530_data { 104struct lm3530_data {
101 struct led_classdev led_dev; 105 struct led_classdev led_dev;
102 struct i2c_client *client; 106 struct i2c_client *client;
103 struct lm3530_platform_data *pdata; 107 struct lm3530_platform_data *pdata;
104 enum lm3530_mode mode; 108 enum lm3530_mode mode;
109 struct regulator *regulator;
110 enum led_brightness brightness;
111 bool enable;
105}; 112};
106 113
107static const u8 lm3530_reg[LM3530_REG_MAX] = { 114static const u8 lm3530_reg[LM3530_REG_MAX] = {
@@ -172,7 +179,10 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
172 brt_ramp = (pltfm->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) | 179 brt_ramp = (pltfm->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
173 (pltfm->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT); 180 (pltfm->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
174 181
175 brightness = pltfm->brt_val; 182 if (drvdata->brightness)
183 brightness = drvdata->brightness;
184 else
185 brightness = drvdata->brightness = pltfm->brt_val;
176 186
177 reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */ 187 reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
178 reg_val[1] = als_config; /* LM3530_ALS_CONFIG */ 188 reg_val[1] = als_config; /* LM3530_ALS_CONFIG */
@@ -190,6 +200,16 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
190 reg_val[13] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */ 200 reg_val[13] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */
191 reg_val[14] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */ 201 reg_val[14] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
192 202
203 if (!drvdata->enable) {
204 ret = regulator_enable(drvdata->regulator);
205 if (ret) {
206 dev_err(&drvdata->client->dev,
207 "Enable regulator failed\n");
208 return ret;
209 }
210 drvdata->enable = true;
211 }
212
193 for (i = 0; i < LM3530_REG_MAX; i++) { 213 for (i = 0; i < LM3530_REG_MAX; i++) {
194 ret = i2c_smbus_write_byte_data(client, 214 ret = i2c_smbus_write_byte_data(client,
195 lm3530_reg[i], reg_val[i]); 215 lm3530_reg[i], reg_val[i]);
@@ -210,12 +230,31 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev,
210 switch (drvdata->mode) { 230 switch (drvdata->mode) {
211 case LM3530_BL_MODE_MANUAL: 231 case LM3530_BL_MODE_MANUAL:
212 232
233 if (!drvdata->enable) {
234 err = lm3530_init_registers(drvdata);
235 if (err) {
236 dev_err(&drvdata->client->dev,
237 "Register Init failed: %d\n", err);
238 break;
239 }
240 }
241
213 /* set the brightness in brightness control register*/ 242 /* set the brightness in brightness control register*/
214 err = i2c_smbus_write_byte_data(drvdata->client, 243 err = i2c_smbus_write_byte_data(drvdata->client,
215 LM3530_BRT_CTRL_REG, brt_val / 2); 244 LM3530_BRT_CTRL_REG, brt_val / 2);
216 if (err) 245 if (err)
217 dev_err(&drvdata->client->dev, 246 dev_err(&drvdata->client->dev,
218 "Unable to set brightness: %d\n", err); 247 "Unable to set brightness: %d\n", err);
248 else
249 drvdata->brightness = brt_val / 2;
250
251 if (brt_val == 0) {
252 err = regulator_disable(drvdata->regulator);
253 if (err)
254 dev_err(&drvdata->client->dev,
255 "Disable regulator failed\n");
256 drvdata->enable = false;
257 }
219 break; 258 break;
220 case LM3530_BL_MODE_ALS: 259 case LM3530_BL_MODE_ALS:
221 break; 260 break;
@@ -297,20 +336,31 @@ static int __devinit lm3530_probe(struct i2c_client *client,
297 drvdata->mode = pdata->mode; 336 drvdata->mode = pdata->mode;
298 drvdata->client = client; 337 drvdata->client = client;
299 drvdata->pdata = pdata; 338 drvdata->pdata = pdata;
339 drvdata->brightness = LED_OFF;
340 drvdata->enable = false;
300 drvdata->led_dev.name = LM3530_LED_DEV; 341 drvdata->led_dev.name = LM3530_LED_DEV;
301 drvdata->led_dev.brightness_set = lm3530_brightness_set; 342 drvdata->led_dev.brightness_set = lm3530_brightness_set;
302 343
303 i2c_set_clientdata(client, drvdata); 344 i2c_set_clientdata(client, drvdata);
304 345
305 err = lm3530_init_registers(drvdata); 346 drvdata->regulator = regulator_get(&client->dev, "vin");
306 if (err < 0) { 347 if (IS_ERR(drvdata->regulator)) {
307 dev_err(&client->dev, "Register Init failed: %d\n", err); 348 dev_err(&client->dev, "regulator get failed\n");
308 err = -ENODEV; 349 err = PTR_ERR(drvdata->regulator);
309 goto err_reg_init; 350 drvdata->regulator = NULL;
351 goto err_regulator_get;
310 } 352 }
311 353
312 err = led_classdev_register((struct device *) 354 if (drvdata->pdata->brt_val) {
313 &client->dev, &drvdata->led_dev); 355 err = lm3530_init_registers(drvdata);
356 if (err < 0) {
357 dev_err(&client->dev,
358 "Register Init failed: %d\n", err);
359 err = -ENODEV;
360 goto err_reg_init;
361 }
362 }
363 err = led_classdev_register(&client->dev, &drvdata->led_dev);
314 if (err < 0) { 364 if (err < 0) {
315 dev_err(&client->dev, "Register led class failed: %d\n", err); 365 dev_err(&client->dev, "Register led class failed: %d\n", err);
316 err = -ENODEV; 366 err = -ENODEV;
@@ -330,6 +380,9 @@ err_create_file:
330 led_classdev_unregister(&drvdata->led_dev); 380 led_classdev_unregister(&drvdata->led_dev);
331err_class_register: 381err_class_register:
332err_reg_init: 382err_reg_init:
383 regulator_put(drvdata->regulator);
384err_regulator_get:
385 i2c_set_clientdata(client, NULL);
333 kfree(drvdata); 386 kfree(drvdata);
334err_out: 387err_out:
335 return err; 388 return err;
@@ -340,6 +393,10 @@ static int __devexit lm3530_remove(struct i2c_client *client)
340 struct lm3530_data *drvdata = i2c_get_clientdata(client); 393 struct lm3530_data *drvdata = i2c_get_clientdata(client);
341 394
342 device_remove_file(drvdata->led_dev.dev, &dev_attr_mode); 395 device_remove_file(drvdata->led_dev.dev, &dev_attr_mode);
396
397 if (drvdata->enable)
398 regulator_disable(drvdata->regulator);
399 regulator_put(drvdata->regulator);
343 led_classdev_unregister(&drvdata->led_dev); 400 led_classdev_unregister(&drvdata->led_dev);
344 kfree(drvdata); 401 kfree(drvdata);
345 return 0; 402 return 0;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 5bf63af09ddf..d8d3a1e910a1 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -1,13 +1,14 @@
1/* 1/*
2 * pca9532.c - 16-bit Led dimmer 2 * pca9532.c - 16-bit Led dimmer
3 * 3 *
4 * Copyright (C) 2011 Jan Weitzel
4 * Copyright (C) 2008 Riku Voipio 5 * Copyright (C) 2008 Riku Voipio
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License. 9 * the Free Software Foundation; version 2 of the License.
9 * 10 *
10 * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf 11 * Datasheet: http://www.nxp.com/documents/data_sheet/PCA9532.pdf
11 * 12 *
12 */ 13 */
13 14
@@ -19,21 +20,32 @@
19#include <linux/mutex.h> 20#include <linux/mutex.h>
20#include <linux/workqueue.h> 21#include <linux/workqueue.h>
21#include <linux/leds-pca9532.h> 22#include <linux/leds-pca9532.h>
23#include <linux/gpio.h>
22 24
23#define PCA9532_REG_PSC(i) (0x2+(i)*2) 25/* m = num_leds*/
24#define PCA9532_REG_PWM(i) (0x3+(i)*2) 26#define PCA9532_REG_INPUT(i) ((i) >> 3)
25#define PCA9532_REG_LS0 0x6 27#define PCA9532_REG_OFFSET(m) ((m) >> 4)
26#define LED_REG(led) ((led>>2)+PCA9532_REG_LS0) 28#define PCA9532_REG_PSC(m, i) (PCA9532_REG_OFFSET(m) + 0x1 + (i) * 2)
27#define LED_NUM(led) (led & 0x3) 29#define PCA9532_REG_PWM(m, i) (PCA9532_REG_OFFSET(m) + 0x2 + (i) * 2)
30#define LED_REG(m, led) (PCA9532_REG_OFFSET(m) + 0x5 + (led >> 2))
31#define LED_NUM(led) (led & 0x3)
28 32
29#define ldev_to_led(c) container_of(c, struct pca9532_led, ldev) 33#define ldev_to_led(c) container_of(c, struct pca9532_led, ldev)
30 34
35struct pca9532_chip_info {
36 u8 num_leds;
37};
38
31struct pca9532_data { 39struct pca9532_data {
32 struct i2c_client *client; 40 struct i2c_client *client;
33 struct pca9532_led leds[16]; 41 struct pca9532_led leds[16];
34 struct mutex update_lock; 42 struct mutex update_lock;
35 struct input_dev *idev; 43 struct input_dev *idev;
36 struct work_struct work; 44 struct work_struct work;
45#ifdef CONFIG_LEDS_PCA9532_GPIO
46 struct gpio_chip gpio;
47#endif
48 const struct pca9532_chip_info *chip_info;
37 u8 pwm[2]; 49 u8 pwm[2];
38 u8 psc[2]; 50 u8 psc[2];
39}; 51};
@@ -42,16 +54,41 @@ static int pca9532_probe(struct i2c_client *client,
42 const struct i2c_device_id *id); 54 const struct i2c_device_id *id);
43static int pca9532_remove(struct i2c_client *client); 55static int pca9532_remove(struct i2c_client *client);
44 56
57enum {
58 pca9530,
59 pca9531,
60 pca9532,
61 pca9533,
62};
63
45static const struct i2c_device_id pca9532_id[] = { 64static const struct i2c_device_id pca9532_id[] = {
46 { "pca9532", 0 }, 65 { "pca9530", pca9530 },
66 { "pca9531", pca9531 },
67 { "pca9532", pca9532 },
68 { "pca9533", pca9533 },
47 { } 69 { }
48}; 70};
49 71
50MODULE_DEVICE_TABLE(i2c, pca9532_id); 72MODULE_DEVICE_TABLE(i2c, pca9532_id);
51 73
74static const struct pca9532_chip_info pca9532_chip_info_tbl[] = {
75 [pca9530] = {
76 .num_leds = 2,
77 },
78 [pca9531] = {
79 .num_leds = 8,
80 },
81 [pca9532] = {
82 .num_leds = 16,
83 },
84 [pca9533] = {
85 .num_leds = 4,
86 },
87};
88
52static struct i2c_driver pca9532_driver = { 89static struct i2c_driver pca9532_driver = {
53 .driver = { 90 .driver = {
54 .name = "pca9532", 91 .name = "pca953x",
55 }, 92 },
56 .probe = pca9532_probe, 93 .probe = pca9532_probe,
57 .remove = pca9532_remove, 94 .remove = pca9532_remove,
@@ -68,7 +105,7 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
68{ 105{
69 int a = 0, b = 0, i = 0; 106 int a = 0, b = 0, i = 0;
70 struct pca9532_data *data = i2c_get_clientdata(client); 107 struct pca9532_data *data = i2c_get_clientdata(client);
71 for (i = 0; i < 16; i++) { 108 for (i = 0; i < data->chip_info->num_leds; i++) {
72 if (data->leds[i].type == PCA9532_TYPE_LED && 109 if (data->leds[i].type == PCA9532_TYPE_LED &&
73 data->leds[i].state == PCA9532_PWM0+pwm) { 110 data->leds[i].state == PCA9532_PWM0+pwm) {
74 a++; 111 a++;
@@ -92,10 +129,12 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink,
92static int pca9532_setpwm(struct i2c_client *client, int pwm) 129static int pca9532_setpwm(struct i2c_client *client, int pwm)
93{ 130{
94 struct pca9532_data *data = i2c_get_clientdata(client); 131 struct pca9532_data *data = i2c_get_clientdata(client);
132 u8 maxleds = data->chip_info->num_leds;
133
95 mutex_lock(&data->update_lock); 134 mutex_lock(&data->update_lock);
96 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), 135 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(maxleds, pwm),
97 data->pwm[pwm]); 136 data->pwm[pwm]);
98 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), 137 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(maxleds, pwm),
99 data->psc[pwm]); 138 data->psc[pwm]);
100 mutex_unlock(&data->update_lock); 139 mutex_unlock(&data->update_lock);
101 return 0; 140 return 0;
@@ -106,15 +145,16 @@ static void pca9532_setled(struct pca9532_led *led)
106{ 145{
107 struct i2c_client *client = led->client; 146 struct i2c_client *client = led->client;
108 struct pca9532_data *data = i2c_get_clientdata(client); 147 struct pca9532_data *data = i2c_get_clientdata(client);
148 u8 maxleds = data->chip_info->num_leds;
109 char reg; 149 char reg;
110 150
111 mutex_lock(&data->update_lock); 151 mutex_lock(&data->update_lock);
112 reg = i2c_smbus_read_byte_data(client, LED_REG(led->id)); 152 reg = i2c_smbus_read_byte_data(client, LED_REG(maxleds, led->id));
113 /* zero led bits */ 153 /* zero led bits */
114 reg = reg & ~(0x3<<LED_NUM(led->id)*2); 154 reg = reg & ~(0x3<<LED_NUM(led->id)*2);
115 /* set the new value */ 155 /* set the new value */
116 reg = reg | (led->state << LED_NUM(led->id)*2); 156 reg = reg | (led->state << LED_NUM(led->id)*2);
117 i2c_smbus_write_byte_data(client, LED_REG(led->id), reg); 157 i2c_smbus_write_byte_data(client, LED_REG(maxleds, led->id), reg);
118 mutex_unlock(&data->update_lock); 158 mutex_unlock(&data->update_lock);
119} 159}
120 160
@@ -183,10 +223,12 @@ static int pca9532_event(struct input_dev *dev, unsigned int type,
183 223
184static void pca9532_input_work(struct work_struct *work) 224static void pca9532_input_work(struct work_struct *work)
185{ 225{
186 struct pca9532_data *data; 226 struct pca9532_data *data =
187 data = container_of(work, struct pca9532_data, work); 227 container_of(work, struct pca9532_data, work);
228 u8 maxleds = data->chip_info->num_leds;
229
188 mutex_lock(&data->update_lock); 230 mutex_lock(&data->update_lock);
189 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), 231 i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(maxleds, 1),
190 data->pwm[1]); 232 data->pwm[1]);
191 mutex_unlock(&data->update_lock); 233 mutex_unlock(&data->update_lock);
192} 234}
@@ -200,16 +242,68 @@ static void pca9532_led_work(struct work_struct *work)
200 pca9532_setled(led); 242 pca9532_setled(led);
201} 243}
202 244
203static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs) 245#ifdef CONFIG_LEDS_PCA9532_GPIO
246static int pca9532_gpio_request_pin(struct gpio_chip *gc, unsigned offset)
247{
248 struct pca9532_data *data = container_of(gc, struct pca9532_data, gpio);
249 struct pca9532_led *led = &data->leds[offset];
250
251 if (led->type == PCA9532_TYPE_GPIO)
252 return 0;
253
254 return -EBUSY;
255}
256
257static void pca9532_gpio_set_value(struct gpio_chip *gc, unsigned offset, int val)
258{
259 struct pca9532_data *data = container_of(gc, struct pca9532_data, gpio);
260 struct pca9532_led *led = &data->leds[offset];
261
262 if (val)
263 led->state = PCA9532_ON;
264 else
265 led->state = PCA9532_OFF;
266
267 pca9532_setled(led);
268}
269
270static int pca9532_gpio_get_value(struct gpio_chip *gc, unsigned offset)
271{
272 struct pca9532_data *data = container_of(gc, struct pca9532_data, gpio);
273 unsigned char reg;
274
275 reg = i2c_smbus_read_byte_data(data->client, PCA9532_REG_INPUT(offset));
276
277 return !!(reg & (1 << (offset % 8)));
278}
279
280static int pca9532_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
281{
282 /* To use as input ensure pin is not driven */
283 pca9532_gpio_set_value(gc, offset, 0);
284
285 return 0;
286}
287
288static int pca9532_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int val)
289{
290 pca9532_gpio_set_value(gc, offset, val);
291
292 return 0;
293}
294#endif /* CONFIG_LEDS_PCA9532_GPIO */
295
296static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
204{ 297{
205 int i = n_devs; 298 int i = n_devs;
206 299
207 if (!data) 300 if (!data)
208 return; 301 return -EINVAL;
209 302
210 while (--i >= 0) { 303 while (--i >= 0) {
211 switch (data->leds[i].type) { 304 switch (data->leds[i].type) {
212 case PCA9532_TYPE_NONE: 305 case PCA9532_TYPE_NONE:
306 case PCA9532_TYPE_GPIO:
213 break; 307 break;
214 case PCA9532_TYPE_LED: 308 case PCA9532_TYPE_LED:
215 led_classdev_unregister(&data->leds[i].ldev); 309 led_classdev_unregister(&data->leds[i].ldev);
@@ -224,23 +318,38 @@ static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
224 break; 318 break;
225 } 319 }
226 } 320 }
321
322#ifdef CONFIG_LEDS_PCA9532_GPIO
323 if (data->gpio.dev) {
324 int err = gpiochip_remove(&data->gpio);
325 if (err) {
326 dev_err(&data->client->dev, "%s failed, %d\n",
327 "gpiochip_remove()", err);
328 return err;
329 }
330 }
331#endif
332
333 return 0;
227} 334}
228 335
229static int pca9532_configure(struct i2c_client *client, 336static int pca9532_configure(struct i2c_client *client,
230 struct pca9532_data *data, struct pca9532_platform_data *pdata) 337 struct pca9532_data *data, struct pca9532_platform_data *pdata)
231{ 338{
232 int i, err = 0; 339 int i, err = 0;
340 int gpios = 0;
341 u8 maxleds = data->chip_info->num_leds;
233 342
234 for (i = 0; i < 2; i++) { 343 for (i = 0; i < 2; i++) {
235 data->pwm[i] = pdata->pwm[i]; 344 data->pwm[i] = pdata->pwm[i];
236 data->psc[i] = pdata->psc[i]; 345 data->psc[i] = pdata->psc[i];
237 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(i), 346 i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(maxleds, i),
238 data->pwm[i]); 347 data->pwm[i]);
239 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(i), 348 i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(maxleds, i),
240 data->psc[i]); 349 data->psc[i]);
241 } 350 }
242 351
243 for (i = 0; i < 16; i++) { 352 for (i = 0; i < data->chip_info->num_leds; i++) {
244 struct pca9532_led *led = &data->leds[i]; 353 struct pca9532_led *led = &data->leds[i];
245 struct pca9532_led *pled = &pdata->leds[i]; 354 struct pca9532_led *pled = &pdata->leds[i];
246 led->client = client; 355 led->client = client;
@@ -249,6 +358,9 @@ static int pca9532_configure(struct i2c_client *client,
249 switch (led->type) { 358 switch (led->type) {
250 case PCA9532_TYPE_NONE: 359 case PCA9532_TYPE_NONE:
251 break; 360 break;
361 case PCA9532_TYPE_GPIO:
362 gpios++;
363 break;
252 case PCA9532_TYPE_LED: 364 case PCA9532_TYPE_LED:
253 led->state = pled->state; 365 led->state = pled->state;
254 led->name = pled->name; 366 led->name = pled->name;
@@ -297,6 +409,34 @@ static int pca9532_configure(struct i2c_client *client,
297 break; 409 break;
298 } 410 }
299 } 411 }
412
413#ifdef CONFIG_LEDS_PCA9532_GPIO
414 if (gpios) {
415 data->gpio.label = "gpio-pca9532";
416 data->gpio.direction_input = pca9532_gpio_direction_input;
417 data->gpio.direction_output = pca9532_gpio_direction_output;
418 data->gpio.set = pca9532_gpio_set_value;
419 data->gpio.get = pca9532_gpio_get_value;
420 data->gpio.request = pca9532_gpio_request_pin;
421 data->gpio.can_sleep = 1;
422 data->gpio.base = pdata->gpio_base;
423 data->gpio.ngpio = data->chip_info->num_leds;
424 data->gpio.dev = &client->dev;
425 data->gpio.owner = THIS_MODULE;
426
427 err = gpiochip_add(&data->gpio);
428 if (err) {
429 /* Use data->gpio.dev as a flag for freeing gpiochip */
430 data->gpio.dev = NULL;
431 dev_warn(&client->dev, "could not add gpiochip\n");
432 } else {
433 dev_info(&client->dev, "gpios %i...%i\n",
434 data->gpio.base, data->gpio.base +
435 data->gpio.ngpio - 1);
436 }
437 }
438#endif
439
300 return 0; 440 return 0;
301 441
302exit: 442exit:
@@ -322,6 +462,8 @@ static int pca9532_probe(struct i2c_client *client,
322 if (!data) 462 if (!data)
323 return -ENOMEM; 463 return -ENOMEM;
324 464
465 data->chip_info = &pca9532_chip_info_tbl[id->driver_data];
466
325 dev_info(&client->dev, "setting platform data\n"); 467 dev_info(&client->dev, "setting platform data\n");
326 i2c_set_clientdata(client, data); 468 i2c_set_clientdata(client, data);
327 data->client = client; 469 data->client = client;
@@ -337,7 +479,12 @@ static int pca9532_probe(struct i2c_client *client,
337static int pca9532_remove(struct i2c_client *client) 479static int pca9532_remove(struct i2c_client *client)
338{ 480{
339 struct pca9532_data *data = i2c_get_clientdata(client); 481 struct pca9532_data *data = i2c_get_clientdata(client);
340 pca9532_destroy_devices(data, 16); 482 int err;
483
484 err = pca9532_destroy_devices(data, data->chip_info->num_leds);
485 if (err)
486 return err;
487
341 kfree(data); 488 kfree(data);
342 return 0; 489 return 0;
343} 490}
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 2dd8ecbfdc31..e77c7f8dcdd4 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -40,10 +40,17 @@ void led_trigger_set_default(struct led_classdev *led_cdev);
40void led_trigger_set(struct led_classdev *led_cdev, 40void led_trigger_set(struct led_classdev *led_cdev,
41 struct led_trigger *trigger); 41 struct led_trigger *trigger);
42void led_trigger_remove(struct led_classdev *led_cdev); 42void led_trigger_remove(struct led_classdev *led_cdev);
43
44static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
45{
46 return led_cdev->trigger_data;
47}
48
43#else 49#else
44#define led_trigger_set_default(x) do {} while (0) 50#define led_trigger_set_default(x) do {} while (0)
45#define led_trigger_set(x, y) do {} while (0) 51#define led_trigger_set(x, y) do {} while (0)
46#define led_trigger_remove(x) do {} while (0) 52#define led_trigger_remove(x) do {} while (0)
53#define led_get_trigger_data(x) (NULL)
47#endif 54#endif
48 55
49ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, 56ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index b09bcbeade9c..d87c9d02f786 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -91,6 +91,9 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
91 if (rc) 91 if (rc)
92 goto err_out_delayon; 92 goto err_out_delayon;
93 93
94 led_blink_set(led_cdev, &led_cdev->blink_delay_on,
95 &led_cdev->blink_delay_off);
96
94 led_cdev->trigger_data = (void *)1; 97 led_cdev->trigger_data = (void *)1;
95 98
96 return; 99 return;
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index d4fe7bc92a1d..4ada9be1d430 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -47,7 +47,7 @@
47#include <plat/dma.h> 47#include <plat/dma.h>
48#include <plat/vram.h> 48#include <plat/vram.h>
49#include <plat/vrfb.h> 49#include <plat/vrfb.h>
50#include <plat/display.h> 50#include <video/omapdss.h>
51 51
52#include "omap_voutlib.h" 52#include "omap_voutlib.h"
53#include "omap_voutdef.h" 53#include "omap_voutdef.h"
diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h
index ea3a047f8bca..659497b84996 100644
--- a/drivers/media/video/omap/omap_voutdef.h
+++ b/drivers/media/video/omap/omap_voutdef.h
@@ -11,7 +11,7 @@
11#ifndef OMAP_VOUTDEF_H 11#ifndef OMAP_VOUTDEF_H
12#define OMAP_VOUTDEF_H 12#define OMAP_VOUTDEF_H
13 13
14#include <plat/display.h> 14#include <video/omapdss.h>
15 15
16#define YUYV_BPP 2 16#define YUYV_BPP 2
17#define RGB565_BPP 2 17#define RGB565_BPP 2
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 3ed3ff06be5d..481770ab2716 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -538,7 +538,7 @@ config AB8500_CORE
538 538
539config AB8500_I2C_CORE 539config AB8500_I2C_CORE
540 bool "AB8500 register access via PRCMU I2C" 540 bool "AB8500 register access via PRCMU I2C"
541 depends on AB8500_CORE && UX500_SOC_DB8500 541 depends on AB8500_CORE && MFD_DB8500_PRCMU
542 default y 542 default y
543 help 543 help
544 This enables register access to the AB8500 chip via PRCMU I2C. 544 This enables register access to the AB8500 chip via PRCMU I2C.
@@ -575,6 +575,26 @@ config AB3550_CORE
575 LEDs, vibrator, system power and temperature, power management 575 LEDs, vibrator, system power and temperature, power management
576 and ALSA sound. 576 and ALSA sound.
577 577
578config MFD_DB8500_PRCMU
579 bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
580 depends on UX500_SOC_DB8500
581 select MFD_CORE
582 help
583 Select this option to enable support for the DB8500 Power Reset
584 and Control Management Unit. This is basically an autonomous
585 system controller running an XP70 microprocessor, which is accessed
586 through a register map.
587
588config MFD_DB5500_PRCMU
589 bool "ST-Ericsson DB5500 Power Reset Control Management Unit"
590 depends on UX500_SOC_DB5500
591 select MFD_CORE
592 help
593 Select this option to enable support for the DB5500 Power Reset
594 and Control Management Unit. This is basically an autonomous
595 system controller running an XP70 microprocessor, which is accessed
596 through a register map.
597
578config MFD_CS5535 598config MFD_CS5535
579 tristate "Support for CS5535 and CS5536 southbridge core functions" 599 tristate "Support for CS5535 and CS5536 southbridge core functions"
580 select MFD_CORE 600 select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 419caa9d7dcf..24aa44448daf 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -74,9 +74,12 @@ obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
74obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o 74obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
75obj-$(CONFIG_AB3550_CORE) += ab3550-core.o 75obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
76obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o 76obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
77obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
78obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o 77obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
79obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o 78obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
79obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
80# ab8500-i2c need to come after db8500-prcmu (which provides the channel)
81obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
82obj-$(CONFIG_MFD_DB5500_PRCMU) += db5500-prcmu.o
80obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o 83obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
81obj-$(CONFIG_PMIC_ADP5520) += adp5520.o 84obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
82obj-$(CONFIG_LPC_SCH) += lpc_sch.o 85obj-$(CONFIG_LPC_SCH) += lpc_sch.o
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
index 821e6b86afd2..9be541c6b004 100644
--- a/drivers/mfd/ab8500-i2c.c
+++ b/drivers/mfd/ab8500-i2c.c
@@ -11,8 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/mfd/ab8500.h> 13#include <linux/mfd/ab8500.h>
14 14#include <linux/mfd/db8500-prcmu.h>
15#include <mach/prcmu.h>
16 15
17static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data) 16static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
18{ 17{
diff --git a/arch/arm/mach-ux500/include/mach/prcmu-regs.h b/drivers/mfd/db5500-prcmu-regs.h
index 455467e88791..9a8e9e4ddd33 100644
--- a/arch/arm/mach-ux500/include/mach/prcmu-regs.h
+++ b/drivers/mfd/db5500-prcmu-regs.h
@@ -15,11 +15,20 @@
15 15
16#include <mach/hardware.h> 16#include <mach/hardware.h>
17 17
18#define _PRCMU_BASE IO_ADDRESS(U8500_PRCMU_BASE)
19
20#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118) 18#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118)
19#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE 0x3f
20#define PRCM_ARM_PLLDIVPS_MAX_MASK 0xf
21
22#define PRCM_PLLARM_LOCKP (_PRCMU_BASE + 0x0a8)
23#define PRCM_PLLARM_LOCKP_PRCM_PLLARM_LOCKP3 0x2
24
21#define PRCM_ARM_CHGCLKREQ (_PRCMU_BASE + 0x114) 25#define PRCM_ARM_CHGCLKREQ (_PRCMU_BASE + 0x114)
26#define PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ 0x1
27
22#define PRCM_PLLARM_ENABLE (_PRCMU_BASE + 0x98) 28#define PRCM_PLLARM_ENABLE (_PRCMU_BASE + 0x98)
29#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE 0x1
30#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON 0x100
31
23#define PRCM_ARMCLKFIX_MGT (_PRCMU_BASE + 0x0) 32#define PRCM_ARMCLKFIX_MGT (_PRCMU_BASE + 0x0)
24#define PRCM_A9_RESETN_CLR (_PRCMU_BASE + 0x1f4) 33#define PRCM_A9_RESETN_CLR (_PRCMU_BASE + 0x1f4)
25#define PRCM_A9_RESETN_SET (_PRCMU_BASE + 0x1f0) 34#define PRCM_A9_RESETN_SET (_PRCMU_BASE + 0x1f0)
@@ -28,7 +37,8 @@
28 37
29/* ARM WFI Standby signal register */ 38/* ARM WFI Standby signal register */
30#define PRCM_ARM_WFI_STANDBY (_PRCMU_BASE + 0x130) 39#define PRCM_ARM_WFI_STANDBY (_PRCMU_BASE + 0x130)
31#define PRCMU_IOCR (_PRCMU_BASE + 0x310) 40#define PRCM_IOCR (_PRCMU_BASE + 0x310)
41#define PRCM_IOCR_IOFORCE 0x1
32 42
33/* CPU mailbox registers */ 43/* CPU mailbox registers */
34#define PRCM_MBOX_CPU_VAL (_PRCMU_BASE + 0x0fc) 44#define PRCM_MBOX_CPU_VAL (_PRCMU_BASE + 0x0fc)
@@ -37,6 +47,8 @@
37 47
38/* Dual A9 core interrupt management unit registers */ 48/* Dual A9 core interrupt management unit registers */
39#define PRCM_A9_MASK_REQ (_PRCMU_BASE + 0x328) 49#define PRCM_A9_MASK_REQ (_PRCMU_BASE + 0x328)
50#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ 0x1
51
40#define PRCM_A9_MASK_ACK (_PRCMU_BASE + 0x32c) 52#define PRCM_A9_MASK_ACK (_PRCMU_BASE + 0x32c)
41#define PRCM_ARMITMSK31TO0 (_PRCMU_BASE + 0x11c) 53#define PRCM_ARMITMSK31TO0 (_PRCMU_BASE + 0x11c)
42#define PRCM_ARMITMSK63TO32 (_PRCMU_BASE + 0x120) 54#define PRCM_ARMITMSK63TO32 (_PRCMU_BASE + 0x120)
@@ -74,14 +86,17 @@
74/* PRCMU clock/PLL/reset registers */ 86/* PRCMU clock/PLL/reset registers */
75#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500) 87#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500)
76#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504) 88#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504)
89#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
77#define PRCM_LCDCLK_MGT (_PRCMU_BASE + 0x044) 90#define PRCM_LCDCLK_MGT (_PRCMU_BASE + 0x044)
78#define PRCM_MCDECLK_MGT (_PRCMU_BASE + 0x064) 91#define PRCM_MCDECLK_MGT (_PRCMU_BASE + 0x064)
79#define PRCM_HDMICLK_MGT (_PRCMU_BASE + 0x058) 92#define PRCM_HDMICLK_MGT (_PRCMU_BASE + 0x058)
80#define PRCM_TVCLK_MGT (_PRCMU_BASE + 0x07c) 93#define PRCM_TVCLK_MGT (_PRCMU_BASE + 0x07c)
81#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530) 94#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530)
82#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C) 95#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C)
96#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508)
83#define PRCM_APE_RESETN_SET (_PRCMU_BASE + 0x1E4) 97#define PRCM_APE_RESETN_SET (_PRCMU_BASE + 0x1E4)
84#define PRCM_APE_RESETN_CLR (_PRCMU_BASE + 0x1E8) 98#define PRCM_APE_RESETN_CLR (_PRCMU_BASE + 0x1E8)
99#define PRCM_CLKOCR (_PRCMU_BASE + 0x1CC)
85 100
86/* ePOD and memory power signal control registers */ 101/* ePOD and memory power signal control registers */
87#define PRCM_EPOD_C_SET (_PRCMU_BASE + 0x410) 102#define PRCM_EPOD_C_SET (_PRCMU_BASE + 0x410)
@@ -92,5 +107,9 @@
92 107
93/* Miscellaneous unit registers */ 108/* Miscellaneous unit registers */
94#define PRCM_DSI_SW_RESET (_PRCMU_BASE + 0x324) 109#define PRCM_DSI_SW_RESET (_PRCMU_BASE + 0x324)
110#define PRCM_GPIOCR (_PRCMU_BASE + 0x138)
111#define PRCM_GPIOCR_DBG_STM_MOD_CMD1 0x800
112#define PRCM_GPIOCR_DBG_UARTMOD_CMD0 0x1
113
95 114
96#endif /* __MACH_PRCMU_REGS_H */ 115#endif /* __MACH_PRCMU__REGS_H */
diff --git a/drivers/mfd/db5500-prcmu.c b/drivers/mfd/db5500-prcmu.c
new file mode 100644
index 000000000000..9dbb3cab4a6f
--- /dev/null
+++ b/drivers/mfd/db5500-prcmu.c
@@ -0,0 +1,448 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
6 *
7 * U5500 PRCM Unit interface driver
8 */
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/spinlock.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/mutex.h>
18#include <linux/completion.h>
19#include <linux/irq.h>
20#include <linux/jiffies.h>
21#include <linux/bitops.h>
22#include <linux/interrupt.h>
23#include <linux/mfd/db5500-prcmu.h>
24#include <mach/hardware.h>
25#include <mach/irqs.h>
26#include <mach/db5500-regs.h>
27#include "db5500-prcmu-regs.h"
28
29#define _PRCM_MB_HEADER (tcdm_base + 0xFE8)
30#define PRCM_REQ_MB0_HEADER (_PRCM_MB_HEADER + 0x0)
31#define PRCM_REQ_MB1_HEADER (_PRCM_MB_HEADER + 0x1)
32#define PRCM_REQ_MB2_HEADER (_PRCM_MB_HEADER + 0x2)
33#define PRCM_REQ_MB3_HEADER (_PRCM_MB_HEADER + 0x3)
34#define PRCM_REQ_MB4_HEADER (_PRCM_MB_HEADER + 0x4)
35#define PRCM_REQ_MB5_HEADER (_PRCM_MB_HEADER + 0x5)
36#define PRCM_REQ_MB6_HEADER (_PRCM_MB_HEADER + 0x6)
37#define PRCM_REQ_MB7_HEADER (_PRCM_MB_HEADER + 0x7)
38#define PRCM_ACK_MB0_HEADER (_PRCM_MB_HEADER + 0x8)
39#define PRCM_ACK_MB1_HEADER (_PRCM_MB_HEADER + 0x9)
40#define PRCM_ACK_MB2_HEADER (_PRCM_MB_HEADER + 0xa)
41#define PRCM_ACK_MB3_HEADER (_PRCM_MB_HEADER + 0xb)
42#define PRCM_ACK_MB4_HEADER (_PRCM_MB_HEADER + 0xc)
43#define PRCM_ACK_MB5_HEADER (_PRCM_MB_HEADER + 0xd)
44#define PRCM_ACK_MB6_HEADER (_PRCM_MB_HEADER + 0xe)
45#define PRCM_ACK_MB7_HEADER (_PRCM_MB_HEADER + 0xf)
46
47/* Req Mailboxes */
48#define PRCM_REQ_MB0 (tcdm_base + 0xFD8)
49#define PRCM_REQ_MB1 (tcdm_base + 0xFCC)
50#define PRCM_REQ_MB2 (tcdm_base + 0xFC4)
51#define PRCM_REQ_MB3 (tcdm_base + 0xFC0)
52#define PRCM_REQ_MB4 (tcdm_base + 0xF98)
53#define PRCM_REQ_MB5 (tcdm_base + 0xF90)
54#define PRCM_REQ_MB6 (tcdm_base + 0xF8C)
55#define PRCM_REQ_MB7 (tcdm_base + 0xF84)
56
57/* Ack Mailboxes */
58#define PRCM_ACK_MB0 (tcdm_base + 0xF38)
59#define PRCM_ACK_MB1 (tcdm_base + 0xF30)
60#define PRCM_ACK_MB2 (tcdm_base + 0xF24)
61#define PRCM_ACK_MB3 (tcdm_base + 0xF20)
62#define PRCM_ACK_MB4 (tcdm_base + 0xF1C)
63#define PRCM_ACK_MB5 (tcdm_base + 0xF14)
64#define PRCM_ACK_MB6 (tcdm_base + 0xF0C)
65#define PRCM_ACK_MB7 (tcdm_base + 0xF08)
66
67enum mb_return_code {
68 RC_SUCCESS,
69 RC_FAIL,
70};
71
72/* Mailbox 0 headers. */
73enum mb0_header {
74 /* request */
75 RMB0H_PWR_STATE_TRANS = 1,
76 RMB0H_WAKE_UP_CFG,
77 RMB0H_RD_WAKE_UP_ACK,
78 /* acknowledge */
79 AMB0H_WAKE_UP = 1,
80};
81
82/* Mailbox 5 headers. */
83enum mb5_header {
84 MB5H_I2C_WRITE = 1,
85 MB5H_I2C_READ,
86};
87
88/* Request mailbox 5 fields. */
89#define PRCM_REQ_MB5_I2C_SLAVE (PRCM_REQ_MB5 + 0)
90#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 1)
91#define PRCM_REQ_MB5_I2C_SIZE (PRCM_REQ_MB5 + 2)
92#define PRCM_REQ_MB5_I2C_DATA (PRCM_REQ_MB5 + 4)
93
94/* Acknowledge mailbox 5 fields. */
95#define PRCM_ACK_MB5_RETURN_CODE (PRCM_ACK_MB5 + 0)
96#define PRCM_ACK_MB5_I2C_DATA (PRCM_ACK_MB5 + 4)
97
98#define NUM_MB 8
99#define MBOX_BIT BIT
100#define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1)
101
102/*
103* Used by MCDE to setup all necessary PRCMU registers
104*/
105#define PRCMU_RESET_DSIPLL 0x00004000
106#define PRCMU_UNCLAMP_DSIPLL 0x00400800
107
108/* HDMI CLK MGT PLLSW=001 (PLLSOC0), PLLDIV=0x8, = 50 Mhz*/
109#define PRCMU_DSI_CLOCK_SETTING 0x00000128
110/* TVCLK_MGT PLLSW=001 (PLLSOC0) PLLDIV=0x13, = 19.05 MHZ */
111#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000135
112#define PRCMU_PLLDSI_FREQ_SETTING 0x0004013C
113#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000002
114#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x03000101
115#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00000101
116
117#define PRCMU_ENABLE_PLLDSI 0x00000001
118#define PRCMU_DISABLE_PLLDSI 0x00000000
119
120#define PRCMU_DSI_RESET_SW 0x00000003
121
122#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
123
124/*
125 * mb0_transfer - state needed for mailbox 0 communication.
126 * @lock: The transaction lock.
127 */
128static struct {
129 spinlock_t lock;
130} mb0_transfer;
131
132/*
133 * mb5_transfer - state needed for mailbox 5 communication.
134 * @lock: The transaction lock.
135 * @work: The transaction completion structure.
136 * @ack: Reply ("acknowledge") data.
137 */
138static struct {
139 struct mutex lock;
140 struct completion work;
141 struct {
142 u8 header;
143 u8 status;
144 u8 value[4];
145 } ack;
146} mb5_transfer;
147
148/* PRCMU TCDM base IO address. */
149static __iomem void *tcdm_base;
150
151/**
152 * db5500_prcmu_abb_read() - Read register value(s) from the ABB.
153 * @slave: The I2C slave address.
154 * @reg: The (start) register address.
155 * @value: The read out value(s).
156 * @size: The number of registers to read.
157 *
158 * Reads register value(s) from the ABB.
159 * @size has to be <= 4.
160 */
161int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
162{
163 int r;
164
165 if ((size < 1) || (4 < size))
166 return -EINVAL;
167
168 mutex_lock(&mb5_transfer.lock);
169
170 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
171 cpu_relax();
172 writeb(slave, PRCM_REQ_MB5_I2C_SLAVE);
173 writeb(reg, PRCM_REQ_MB5_I2C_REG);
174 writeb(size, PRCM_REQ_MB5_I2C_SIZE);
175 writeb(MB5H_I2C_READ, PRCM_REQ_MB5_HEADER);
176
177 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
178 wait_for_completion(&mb5_transfer.work);
179
180 r = 0;
181 if ((mb5_transfer.ack.header == MB5H_I2C_READ) &&
182 (mb5_transfer.ack.status == RC_SUCCESS))
183 memcpy(value, mb5_transfer.ack.value, (size_t)size);
184 else
185 r = -EIO;
186
187 mutex_unlock(&mb5_transfer.lock);
188
189 return r;
190}
191
192/**
193 * db5500_prcmu_abb_write() - Write register value(s) to the ABB.
194 * @slave: The I2C slave address.
195 * @reg: The (start) register address.
196 * @value: The value(s) to write.
197 * @size: The number of registers to write.
198 *
199 * Writes register value(s) to the ABB.
200 * @size has to be <= 4.
201 */
202int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
203{
204 int r;
205
206 if ((size < 1) || (4 < size))
207 return -EINVAL;
208
209 mutex_lock(&mb5_transfer.lock);
210
211 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
212 cpu_relax();
213 writeb(slave, PRCM_REQ_MB5_I2C_SLAVE);
214 writeb(reg, PRCM_REQ_MB5_I2C_REG);
215 writeb(size, PRCM_REQ_MB5_I2C_SIZE);
216 memcpy_toio(PRCM_REQ_MB5_I2C_DATA, value, size);
217 writeb(MB5H_I2C_WRITE, PRCM_REQ_MB5_HEADER);
218
219 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
220 wait_for_completion(&mb5_transfer.work);
221
222 if ((mb5_transfer.ack.header == MB5H_I2C_WRITE) &&
223 (mb5_transfer.ack.status == RC_SUCCESS))
224 r = 0;
225 else
226 r = -EIO;
227
228 mutex_unlock(&mb5_transfer.lock);
229
230 return r;
231}
232
233int db5500_prcmu_enable_dsipll(void)
234{
235 int i;
236
237 /* Enable DSIPLL_RESETN resets */
238 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
239 /* Unclamp DSIPLL in/out */
240 writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
241 /* Set DSI PLL FREQ */
242 writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ);
243 writel(PRCMU_DSI_PLLOUT_SEL_SETTING,
244 PRCM_DSI_PLLOUT_SEL);
245 /* Enable Escape clocks */
246 writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
247
248 /* Start DSI PLL */
249 writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
250 /* Reset DSI PLL */
251 writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
252 for (i = 0; i < 10; i++) {
253 if ((readl(PRCM_PLLDSI_LOCKP) &
254 PRCMU_PLLDSI_LOCKP_LOCKED) == PRCMU_PLLDSI_LOCKP_LOCKED)
255 break;
256 udelay(100);
257 }
258 /* Release DSIPLL_RESETN */
259 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
260 return 0;
261}
262
263int db5500_prcmu_disable_dsipll(void)
264{
265 /* Disable dsi pll */
266 writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
267 /* Disable escapeclock */
268 writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
269 return 0;
270}
271
272int db5500_prcmu_set_display_clocks(void)
273{
274 /* HDMI and TVCLK Should be handled somewhere else */
275 /* PLLDIV=8, PLLSW=2, CLKEN=1 */
276 writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT);
277 /* PLLDIV=14, PLLSW=2, CLKEN=1 */
278 writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT);
279 return 0;
280}
281
282static void ack_dbb_wakeup(void)
283{
284 unsigned long flags;
285
286 spin_lock_irqsave(&mb0_transfer.lock, flags);
287
288 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
289 cpu_relax();
290
291 writeb(RMB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER);
292 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
293
294 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
295}
296
297static inline void print_unknown_header_warning(u8 n, u8 header)
298{
299 pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
300 header, n);
301}
302
303static bool read_mailbox_0(void)
304{
305 bool r;
306 u8 header;
307
308 header = readb(PRCM_ACK_MB0_HEADER);
309 switch (header) {
310 case AMB0H_WAKE_UP:
311 r = true;
312 break;
313 default:
314 print_unknown_header_warning(0, header);
315 r = false;
316 break;
317 }
318 writel(MBOX_BIT(0), PRCM_ARM_IT1_CLEAR);
319 return r;
320}
321
322static bool read_mailbox_1(void)
323{
324 writel(MBOX_BIT(1), PRCM_ARM_IT1_CLEAR);
325 return false;
326}
327
328static bool read_mailbox_2(void)
329{
330 writel(MBOX_BIT(2), PRCM_ARM_IT1_CLEAR);
331 return false;
332}
333
334static bool read_mailbox_3(void)
335{
336 writel(MBOX_BIT(3), PRCM_ARM_IT1_CLEAR);
337 return false;
338}
339
340static bool read_mailbox_4(void)
341{
342 writel(MBOX_BIT(4), PRCM_ARM_IT1_CLEAR);
343 return false;
344}
345
346static bool read_mailbox_5(void)
347{
348 u8 header;
349
350 header = readb(PRCM_ACK_MB5_HEADER);
351 switch (header) {
352 case MB5H_I2C_READ:
353 memcpy_fromio(mb5_transfer.ack.value, PRCM_ACK_MB5_I2C_DATA, 4);
354 case MB5H_I2C_WRITE:
355 mb5_transfer.ack.header = header;
356 mb5_transfer.ack.status = readb(PRCM_ACK_MB5_RETURN_CODE);
357 complete(&mb5_transfer.work);
358 break;
359 default:
360 print_unknown_header_warning(5, header);
361 break;
362 }
363 writel(MBOX_BIT(5), PRCM_ARM_IT1_CLEAR);
364 return false;
365}
366
367static bool read_mailbox_6(void)
368{
369 writel(MBOX_BIT(6), PRCM_ARM_IT1_CLEAR);
370 return false;
371}
372
373static bool read_mailbox_7(void)
374{
375 writel(MBOX_BIT(7), PRCM_ARM_IT1_CLEAR);
376 return false;
377}
378
379static bool (* const read_mailbox[NUM_MB])(void) = {
380 read_mailbox_0,
381 read_mailbox_1,
382 read_mailbox_2,
383 read_mailbox_3,
384 read_mailbox_4,
385 read_mailbox_5,
386 read_mailbox_6,
387 read_mailbox_7
388};
389
390static irqreturn_t prcmu_irq_handler(int irq, void *data)
391{
392 u32 bits;
393 u8 n;
394 irqreturn_t r;
395
396 bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
397 if (unlikely(!bits))
398 return IRQ_NONE;
399
400 r = IRQ_HANDLED;
401 for (n = 0; bits; n++) {
402 if (bits & MBOX_BIT(n)) {
403 bits -= MBOX_BIT(n);
404 if (read_mailbox[n]())
405 r = IRQ_WAKE_THREAD;
406 }
407 }
408 return r;
409}
410
411static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
412{
413 ack_dbb_wakeup();
414 return IRQ_HANDLED;
415}
416
417void __init db5500_prcmu_early_init(void)
418{
419 tcdm_base = __io_address(U5500_PRCMU_TCDM_BASE);
420 spin_lock_init(&mb0_transfer.lock);
421 mutex_init(&mb5_transfer.lock);
422 init_completion(&mb5_transfer.work);
423}
424
425/**
426 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
427 *
428 */
429int __init db5500_prcmu_init(void)
430{
431 int r = 0;
432
433 if (ux500_is_svp() || !cpu_is_u5500())
434 return -ENODEV;
435
436 /* Clean up the mailbox interrupts after pre-kernel code. */
437 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLEAR);
438
439 r = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler,
440 prcmu_irq_thread_fn, 0, "prcmu", NULL);
441 if (r < 0) {
442 pr_err("prcmu: Failed to allocate IRQ_DB5500_PRCMU1.\n");
443 return -EBUSY;
444 }
445 return 0;
446}
447
448arch_initcall(db5500_prcmu_init);
diff --git a/drivers/mfd/db8500-prcmu-regs.h b/drivers/mfd/db8500-prcmu-regs.h
new file mode 100644
index 000000000000..3bbf04d58043
--- /dev/null
+++ b/drivers/mfd/db8500-prcmu-regs.h
@@ -0,0 +1,166 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
6 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
7 *
8 * License Terms: GNU General Public License v2
9 *
10 * PRCM Unit registers
11 */
12#ifndef __DB8500_PRCMU_REGS_H
13#define __DB8500_PRCMU_REGS_H
14
15#include <linux/bitops.h>
16#include <mach/hardware.h>
17
18#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end))
19
20#define PRCM_ARM_PLLDIVPS 0x118
21#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE BITS(0, 5)
22#define PRCM_ARM_PLLDIVPS_MAX_MASK 0xF
23
24#define PRCM_PLLARM_LOCKP 0x0A8
25#define PRCM_PLLARM_LOCKP_PRCM_PLLARM_LOCKP3 BIT(1)
26
27#define PRCM_ARM_CHGCLKREQ 0x114
28#define PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ BIT(0)
29
30#define PRCM_PLLARM_ENABLE 0x98
31#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE BIT(0)
32#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON BIT(8)
33
34#define PRCM_ARMCLKFIX_MGT 0x0
35#define PRCM_A9_RESETN_CLR 0x1f4
36#define PRCM_A9_RESETN_SET 0x1f0
37#define PRCM_ARM_LS_CLAMP 0x30C
38#define PRCM_SRAM_A9 0x308
39
40/* ARM WFI Standby signal register */
41#define PRCM_ARM_WFI_STANDBY 0x130
42#define PRCM_IOCR 0x310
43#define PRCM_IOCR_IOFORCE BIT(0)
44
45/* CPU mailbox registers */
46#define PRCM_MBOX_CPU_VAL 0x0FC
47#define PRCM_MBOX_CPU_SET 0x100
48
49/* Dual A9 core interrupt management unit registers */
50#define PRCM_A9_MASK_REQ 0x328
51#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ BIT(0)
52
53#define PRCM_A9_MASK_ACK 0x32C
54#define PRCM_ARMITMSK31TO0 0x11C
55#define PRCM_ARMITMSK63TO32 0x120
56#define PRCM_ARMITMSK95TO64 0x124
57#define PRCM_ARMITMSK127TO96 0x128
58#define PRCM_POWER_STATE_VAL 0x25C
59#define PRCM_ARMITVAL31TO0 0x260
60#define PRCM_ARMITVAL63TO32 0x264
61#define PRCM_ARMITVAL95TO64 0x268
62#define PRCM_ARMITVAL127TO96 0x26C
63
64#define PRCM_HOSTACCESS_REQ 0x334
65#define PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ BIT(0)
66
67#define PRCM_ARM_IT1_CLR 0x48C
68#define PRCM_ARM_IT1_VAL 0x494
69
70#define PRCM_ITSTATUS0 0x148
71#define PRCM_ITSTATUS1 0x150
72#define PRCM_ITSTATUS2 0x158
73#define PRCM_ITSTATUS3 0x160
74#define PRCM_ITSTATUS4 0x168
75#define PRCM_ITSTATUS5 0x484
76#define PRCM_ITCLEAR5 0x488
77#define PRCM_ARMIT_MASKXP70_IT 0x1018
78
79/* System reset register */
80#define PRCM_APE_SOFTRST 0x228
81
82/* Level shifter and clamp control registers */
83#define PRCM_MMIP_LS_CLAMP_SET 0x420
84#define PRCM_MMIP_LS_CLAMP_CLR 0x424
85
86/* PRCMU HW semaphore */
87#define PRCM_SEM 0x400
88#define PRCM_SEM_PRCM_SEM BIT(0)
89
90/* PRCMU clock/PLL/reset registers */
91#define PRCM_PLLDSI_FREQ 0x500
92#define PRCM_PLLDSI_ENABLE 0x504
93#define PRCM_PLLDSI_LOCKP 0x508
94#define PRCM_DSI_PLLOUT_SEL 0x530
95#define PRCM_DSITVCLK_DIV 0x52C
96#define PRCM_APE_RESETN_SET 0x1E4
97#define PRCM_APE_RESETN_CLR 0x1E8
98
99#define PRCM_TCR 0x1C8
100#define PRCM_TCR_TENSEL_MASK BITS(0, 7)
101#define PRCM_TCR_STOP_TIMERS BIT(16)
102#define PRCM_TCR_DOZE_MODE BIT(17)
103
104#define PRCM_CLKOCR 0x1CC
105#define PRCM_CLKOCR_CLKODIV0_SHIFT 0
106#define PRCM_CLKOCR_CLKODIV0_MASK BITS(0, 5)
107#define PRCM_CLKOCR_CLKOSEL0_SHIFT 6
108#define PRCM_CLKOCR_CLKOSEL0_MASK BITS(6, 8)
109#define PRCM_CLKOCR_CLKODIV1_SHIFT 16
110#define PRCM_CLKOCR_CLKODIV1_MASK BITS(16, 21)
111#define PRCM_CLKOCR_CLKOSEL1_SHIFT 22
112#define PRCM_CLKOCR_CLKOSEL1_MASK BITS(22, 24)
113#define PRCM_CLKOCR_CLK1TYPE BIT(28)
114
115#define PRCM_SGACLK_MGT 0x014
116#define PRCM_UARTCLK_MGT 0x018
117#define PRCM_MSP02CLK_MGT 0x01C
118#define PRCM_MSP1CLK_MGT 0x288
119#define PRCM_I2CCLK_MGT 0x020
120#define PRCM_SDMMCCLK_MGT 0x024
121#define PRCM_SLIMCLK_MGT 0x028
122#define PRCM_PER1CLK_MGT 0x02C
123#define PRCM_PER2CLK_MGT 0x030
124#define PRCM_PER3CLK_MGT 0x034
125#define PRCM_PER5CLK_MGT 0x038
126#define PRCM_PER6CLK_MGT 0x03C
127#define PRCM_PER7CLK_MGT 0x040
128#define PRCM_LCDCLK_MGT 0x044
129#define PRCM_BMLCLK_MGT 0x04C
130#define PRCM_HSITXCLK_MGT 0x050
131#define PRCM_HSIRXCLK_MGT 0x054
132#define PRCM_HDMICLK_MGT 0x058
133#define PRCM_APEATCLK_MGT 0x05C
134#define PRCM_APETRACECLK_MGT 0x060
135#define PRCM_MCDECLK_MGT 0x064
136#define PRCM_IPI2CCLK_MGT 0x068
137#define PRCM_DSIALTCLK_MGT 0x06C
138#define PRCM_DMACLK_MGT 0x074
139#define PRCM_B2R2CLK_MGT 0x078
140#define PRCM_TVCLK_MGT 0x07C
141#define PRCM_UNIPROCLK_MGT 0x278
142#define PRCM_SSPCLK_MGT 0x280
143#define PRCM_RNGCLK_MGT 0x284
144#define PRCM_UICCCLK_MGT 0x27C
145
146#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4)
147#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7)
148#define PRCM_CLK_MGT_CLKEN BIT(8)
149
150/* ePOD and memory power signal control registers */
151#define PRCM_EPOD_C_SET 0x410
152#define PRCM_SRAM_LS_SLEEP 0x304
153
154/* Debug power control unit registers */
155#define PRCM_POWER_STATE_SET 0x254
156
157/* Miscellaneous unit registers */
158#define PRCM_DSI_SW_RESET 0x324
159#define PRCM_GPIOCR 0x138
160
161/* GPIOCR register */
162#define PRCM_GPIOCR_SPI2_SELECT BIT(23)
163
164#define PRCM_DDR_SUBSYS_APE_MINBW 0x438
165
166#endif /* __DB8500_PRCMU_REGS_H */
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
new file mode 100644
index 000000000000..e63782107e2f
--- /dev/null
+++ b/drivers/mfd/db8500-prcmu.c
@@ -0,0 +1,2069 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * License Terms: GNU General Public License v2
6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
7 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
8 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
9 *
10 * U8500 PRCM Unit interface driver
11 *
12 */
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/delay.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/spinlock.h>
19#include <linux/io.h>
20#include <linux/slab.h>
21#include <linux/mutex.h>
22#include <linux/completion.h>
23#include <linux/irq.h>
24#include <linux/jiffies.h>
25#include <linux/bitops.h>
26#include <linux/fs.h>
27#include <linux/platform_device.h>
28#include <linux/uaccess.h>
29#include <linux/mfd/core.h>
30#include <linux/mfd/db8500-prcmu.h>
31#include <linux/regulator/db8500-prcmu.h>
32#include <linux/regulator/machine.h>
33#include <mach/hardware.h>
34#include <mach/irqs.h>
35#include <mach/db8500-regs.h>
36#include <mach/id.h>
37#include "db8500-prcmu-regs.h"
38
39/* Offset for the firmware version within the TCPM */
40#define PRCMU_FW_VERSION_OFFSET 0xA4
41
42/* PRCMU project numbers, defined by PRCMU FW */
43#define PRCMU_PROJECT_ID_8500V1_0 1
44#define PRCMU_PROJECT_ID_8500V2_0 2
45#define PRCMU_PROJECT_ID_8400V2_0 3
46
47/* Index of different voltages to be used when accessing AVSData */
48#define PRCM_AVS_BASE 0x2FC
49#define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0)
50#define PRCM_AVS_VBB_MAX_OPP (PRCM_AVS_BASE + 0x1)
51#define PRCM_AVS_VBB_100_OPP (PRCM_AVS_BASE + 0x2)
52#define PRCM_AVS_VBB_50_OPP (PRCM_AVS_BASE + 0x3)
53#define PRCM_AVS_VARM_MAX_OPP (PRCM_AVS_BASE + 0x4)
54#define PRCM_AVS_VARM_100_OPP (PRCM_AVS_BASE + 0x5)
55#define PRCM_AVS_VARM_50_OPP (PRCM_AVS_BASE + 0x6)
56#define PRCM_AVS_VARM_RET (PRCM_AVS_BASE + 0x7)
57#define PRCM_AVS_VAPE_100_OPP (PRCM_AVS_BASE + 0x8)
58#define PRCM_AVS_VAPE_50_OPP (PRCM_AVS_BASE + 0x9)
59#define PRCM_AVS_VMOD_100_OPP (PRCM_AVS_BASE + 0xA)
60#define PRCM_AVS_VMOD_50_OPP (PRCM_AVS_BASE + 0xB)
61#define PRCM_AVS_VSAFE (PRCM_AVS_BASE + 0xC)
62
63#define PRCM_AVS_VOLTAGE 0
64#define PRCM_AVS_VOLTAGE_MASK 0x3f
65#define PRCM_AVS_ISSLOWSTARTUP 6
66#define PRCM_AVS_ISSLOWSTARTUP_MASK (1 << PRCM_AVS_ISSLOWSTARTUP)
67#define PRCM_AVS_ISMODEENABLE 7
68#define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE)
69
70#define PRCM_BOOT_STATUS 0xFFF
71#define PRCM_ROMCODE_A2P 0xFFE
72#define PRCM_ROMCODE_P2A 0xFFD
73#define PRCM_XP70_CUR_PWR_STATE 0xFFC /* 4 BYTES */
74
75#define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */
76
77#define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */
78#define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0)
79#define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1)
80#define PRCM_MBOX_HEADER_REQ_MB2 (_PRCM_MBOX_HEADER + 0x2)
81#define PRCM_MBOX_HEADER_REQ_MB3 (_PRCM_MBOX_HEADER + 0x3)
82#define PRCM_MBOX_HEADER_REQ_MB4 (_PRCM_MBOX_HEADER + 0x4)
83#define PRCM_MBOX_HEADER_REQ_MB5 (_PRCM_MBOX_HEADER + 0x5)
84#define PRCM_MBOX_HEADER_ACK_MB0 (_PRCM_MBOX_HEADER + 0x8)
85
86/* Req Mailboxes */
87#define PRCM_REQ_MB0 0xFDC /* 12 bytes */
88#define PRCM_REQ_MB1 0xFD0 /* 12 bytes */
89#define PRCM_REQ_MB2 0xFC0 /* 16 bytes */
90#define PRCM_REQ_MB3 0xE4C /* 372 bytes */
91#define PRCM_REQ_MB4 0xE48 /* 4 bytes */
92#define PRCM_REQ_MB5 0xE44 /* 4 bytes */
93
94/* Ack Mailboxes */
95#define PRCM_ACK_MB0 0xE08 /* 52 bytes */
96#define PRCM_ACK_MB1 0xE04 /* 4 bytes */
97#define PRCM_ACK_MB2 0xE00 /* 4 bytes */
98#define PRCM_ACK_MB3 0xDFC /* 4 bytes */
99#define PRCM_ACK_MB4 0xDF8 /* 4 bytes */
100#define PRCM_ACK_MB5 0xDF4 /* 4 bytes */
101
102/* Mailbox 0 headers */
103#define MB0H_POWER_STATE_TRANS 0
104#define MB0H_CONFIG_WAKEUPS_EXE 1
105#define MB0H_READ_WAKEUP_ACK 3
106#define MB0H_CONFIG_WAKEUPS_SLEEP 4
107
108#define MB0H_WAKEUP_EXE 2
109#define MB0H_WAKEUP_SLEEP 5
110
111/* Mailbox 0 REQs */
112#define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0)
113#define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x1)
114#define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x2)
115#define PRCM_REQ_MB0_DO_NOT_WFI (PRCM_REQ_MB0 + 0x3)
116#define PRCM_REQ_MB0_WAKEUP_8500 (PRCM_REQ_MB0 + 0x4)
117#define PRCM_REQ_MB0_WAKEUP_4500 (PRCM_REQ_MB0 + 0x8)
118
119/* Mailbox 0 ACKs */
120#define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0)
121#define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1)
122#define PRCM_ACK_MB0_WAKEUP_0_8500 (PRCM_ACK_MB0 + 0x4)
123#define PRCM_ACK_MB0_WAKEUP_0_4500 (PRCM_ACK_MB0 + 0x8)
124#define PRCM_ACK_MB0_WAKEUP_1_8500 (PRCM_ACK_MB0 + 0x1C)
125#define PRCM_ACK_MB0_WAKEUP_1_4500 (PRCM_ACK_MB0 + 0x20)
126#define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20
127
128/* Mailbox 1 headers */
129#define MB1H_ARM_APE_OPP 0x0
130#define MB1H_RESET_MODEM 0x2
131#define MB1H_REQUEST_APE_OPP_100_VOLT 0x3
132#define MB1H_RELEASE_APE_OPP_100_VOLT 0x4
133#define MB1H_RELEASE_USB_WAKEUP 0x5
134
135/* Mailbox 1 Requests */
136#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
137#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
138#define PRCM_REQ_MB1_APE_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x4)
139#define PRCM_REQ_MB1_ARM_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x8)
140
141/* Mailbox 1 ACKs */
142#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0)
143#define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1)
144#define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2)
145#define PRCM_ACK_MB1_DVFS_STATUS (PRCM_ACK_MB1 + 0x3)
146
147/* Mailbox 2 headers */
148#define MB2H_DPS 0x0
149#define MB2H_AUTO_PWR 0x1
150
151/* Mailbox 2 REQs */
152#define PRCM_REQ_MB2_SVA_MMDSP (PRCM_REQ_MB2 + 0x0)
153#define PRCM_REQ_MB2_SVA_PIPE (PRCM_REQ_MB2 + 0x1)
154#define PRCM_REQ_MB2_SIA_MMDSP (PRCM_REQ_MB2 + 0x2)
155#define PRCM_REQ_MB2_SIA_PIPE (PRCM_REQ_MB2 + 0x3)
156#define PRCM_REQ_MB2_SGA (PRCM_REQ_MB2 + 0x4)
157#define PRCM_REQ_MB2_B2R2_MCDE (PRCM_REQ_MB2 + 0x5)
158#define PRCM_REQ_MB2_ESRAM12 (PRCM_REQ_MB2 + 0x6)
159#define PRCM_REQ_MB2_ESRAM34 (PRCM_REQ_MB2 + 0x7)
160#define PRCM_REQ_MB2_AUTO_PM_SLEEP (PRCM_REQ_MB2 + 0x8)
161#define PRCM_REQ_MB2_AUTO_PM_IDLE (PRCM_REQ_MB2 + 0xC)
162
163/* Mailbox 2 ACKs */
164#define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0)
165#define HWACC_PWR_ST_OK 0xFE
166
167/* Mailbox 3 headers */
168#define MB3H_ANC 0x0
169#define MB3H_SIDETONE 0x1
170#define MB3H_SYSCLK 0xE
171
172/* Mailbox 3 Requests */
173#define PRCM_REQ_MB3_ANC_FIR_COEFF (PRCM_REQ_MB3 + 0x0)
174#define PRCM_REQ_MB3_ANC_IIR_COEFF (PRCM_REQ_MB3 + 0x20)
175#define PRCM_REQ_MB3_ANC_SHIFTER (PRCM_REQ_MB3 + 0x60)
176#define PRCM_REQ_MB3_ANC_WARP (PRCM_REQ_MB3 + 0x64)
177#define PRCM_REQ_MB3_SIDETONE_FIR_GAIN (PRCM_REQ_MB3 + 0x68)
178#define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C)
179#define PRCM_REQ_MB3_SYSCLK_MGT (PRCM_REQ_MB3 + 0x16C)
180
181/* Mailbox 4 headers */
182#define MB4H_DDR_INIT 0x0
183#define MB4H_MEM_ST 0x1
184#define MB4H_HOTDOG 0x12
185#define MB4H_HOTMON 0x13
186#define MB4H_HOT_PERIOD 0x14
187
188/* Mailbox 4 Requests */
189#define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0)
190#define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE (PRCM_REQ_MB4 + 0x1)
191#define PRCM_REQ_MB4_ESRAM0_ST (PRCM_REQ_MB4 + 0x3)
192#define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 0x0)
193#define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 0x0)
194#define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 0x1)
195#define PRCM_REQ_MB4_HOTMON_CONFIG (PRCM_REQ_MB4 + 0x2)
196#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0)
197#define HOTMON_CONFIG_LOW BIT(0)
198#define HOTMON_CONFIG_HIGH BIT(1)
199
200/* Mailbox 5 Requests */
201#define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0)
202#define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1)
203#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2)
204#define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3)
205#define PRCMU_I2C_WRITE(slave) \
206 (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
207#define PRCMU_I2C_READ(slave) \
208 (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0))
209#define PRCMU_I2C_STOP_EN BIT(3)
210
211/* Mailbox 5 ACKs */
212#define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1)
213#define PRCM_ACK_MB5_I2C_VAL (PRCM_ACK_MB5 + 0x3)
214#define I2C_WR_OK 0x1
215#define I2C_RD_OK 0x2
216
217#define NUM_MB 8
218#define MBOX_BIT BIT
219#define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1)
220
221/*
222 * Wakeups/IRQs
223 */
224
225#define WAKEUP_BIT_RTC BIT(0)
226#define WAKEUP_BIT_RTT0 BIT(1)
227#define WAKEUP_BIT_RTT1 BIT(2)
228#define WAKEUP_BIT_HSI0 BIT(3)
229#define WAKEUP_BIT_HSI1 BIT(4)
230#define WAKEUP_BIT_CA_WAKE BIT(5)
231#define WAKEUP_BIT_USB BIT(6)
232#define WAKEUP_BIT_ABB BIT(7)
233#define WAKEUP_BIT_ABB_FIFO BIT(8)
234#define WAKEUP_BIT_SYSCLK_OK BIT(9)
235#define WAKEUP_BIT_CA_SLEEP BIT(10)
236#define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
237#define WAKEUP_BIT_SIDE_TONE_OK BIT(12)
238#define WAKEUP_BIT_ANC_OK BIT(13)
239#define WAKEUP_BIT_SW_ERROR BIT(14)
240#define WAKEUP_BIT_AC_SLEEP_ACK BIT(15)
241#define WAKEUP_BIT_ARM BIT(17)
242#define WAKEUP_BIT_HOTMON_LOW BIT(18)
243#define WAKEUP_BIT_HOTMON_HIGH BIT(19)
244#define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
245#define WAKEUP_BIT_GPIO0 BIT(23)
246#define WAKEUP_BIT_GPIO1 BIT(24)
247#define WAKEUP_BIT_GPIO2 BIT(25)
248#define WAKEUP_BIT_GPIO3 BIT(26)
249#define WAKEUP_BIT_GPIO4 BIT(27)
250#define WAKEUP_BIT_GPIO5 BIT(28)
251#define WAKEUP_BIT_GPIO6 BIT(29)
252#define WAKEUP_BIT_GPIO7 BIT(30)
253#define WAKEUP_BIT_GPIO8 BIT(31)
254
255/*
256 * This vector maps irq numbers to the bits in the bit field used in
257 * communication with the PRCMU firmware.
258 *
259 * The reason for having this is to keep the irq numbers contiguous even though
260 * the bits in the bit field are not. (The bits also have a tendency to move
261 * around, to further complicate matters.)
262 */
263#define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name) - IRQ_PRCMU_BASE)
264#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
265static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = {
266 IRQ_ENTRY(RTC),
267 IRQ_ENTRY(RTT0),
268 IRQ_ENTRY(RTT1),
269 IRQ_ENTRY(HSI0),
270 IRQ_ENTRY(HSI1),
271 IRQ_ENTRY(CA_WAKE),
272 IRQ_ENTRY(USB),
273 IRQ_ENTRY(ABB),
274 IRQ_ENTRY(ABB_FIFO),
275 IRQ_ENTRY(CA_SLEEP),
276 IRQ_ENTRY(ARM),
277 IRQ_ENTRY(HOTMON_LOW),
278 IRQ_ENTRY(HOTMON_HIGH),
279 IRQ_ENTRY(MODEM_SW_RESET_REQ),
280 IRQ_ENTRY(GPIO0),
281 IRQ_ENTRY(GPIO1),
282 IRQ_ENTRY(GPIO2),
283 IRQ_ENTRY(GPIO3),
284 IRQ_ENTRY(GPIO4),
285 IRQ_ENTRY(GPIO5),
286 IRQ_ENTRY(GPIO6),
287 IRQ_ENTRY(GPIO7),
288 IRQ_ENTRY(GPIO8)
289};
290
291#define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
292#define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
293static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
294 WAKEUP_ENTRY(RTC),
295 WAKEUP_ENTRY(RTT0),
296 WAKEUP_ENTRY(RTT1),
297 WAKEUP_ENTRY(HSI0),
298 WAKEUP_ENTRY(HSI1),
299 WAKEUP_ENTRY(USB),
300 WAKEUP_ENTRY(ABB),
301 WAKEUP_ENTRY(ABB_FIFO),
302 WAKEUP_ENTRY(ARM)
303};
304
305/*
306 * mb0_transfer - state needed for mailbox 0 communication.
307 * @lock: The transaction lock.
308 * @dbb_events_lock: A lock used to handle concurrent access to (parts of)
309 * the request data.
310 * @mask_work: Work structure used for (un)masking wakeup interrupts.
311 * @req: Request data that need to persist between requests.
312 */
313static struct {
314 spinlock_t lock;
315 spinlock_t dbb_irqs_lock;
316 struct work_struct mask_work;
317 struct mutex ac_wake_lock;
318 struct completion ac_wake_work;
319 struct {
320 u32 dbb_irqs;
321 u32 dbb_wakeups;
322 u32 abb_events;
323 } req;
324} mb0_transfer;
325
326/*
327 * mb1_transfer - state needed for mailbox 1 communication.
328 * @lock: The transaction lock.
329 * @work: The transaction completion structure.
330 * @ack: Reply ("acknowledge") data.
331 */
332static struct {
333 struct mutex lock;
334 struct completion work;
335 struct {
336 u8 header;
337 u8 arm_opp;
338 u8 ape_opp;
339 u8 ape_voltage_status;
340 } ack;
341} mb1_transfer;
342
343/*
344 * mb2_transfer - state needed for mailbox 2 communication.
345 * @lock: The transaction lock.
346 * @work: The transaction completion structure.
347 * @auto_pm_lock: The autonomous power management configuration lock.
348 * @auto_pm_enabled: A flag indicating whether autonomous PM is enabled.
349 * @req: Request data that need to persist between requests.
350 * @ack: Reply ("acknowledge") data.
351 */
352static struct {
353 struct mutex lock;
354 struct completion work;
355 spinlock_t auto_pm_lock;
356 bool auto_pm_enabled;
357 struct {
358 u8 status;
359 } ack;
360} mb2_transfer;
361
362/*
363 * mb3_transfer - state needed for mailbox 3 communication.
364 * @lock: The request lock.
365 * @sysclk_lock: A lock used to handle concurrent sysclk requests.
366 * @sysclk_work: Work structure used for sysclk requests.
367 */
368static struct {
369 spinlock_t lock;
370 struct mutex sysclk_lock;
371 struct completion sysclk_work;
372} mb3_transfer;
373
374/*
375 * mb4_transfer - state needed for mailbox 4 communication.
376 * @lock: The transaction lock.
377 * @work: The transaction completion structure.
378 */
379static struct {
380 struct mutex lock;
381 struct completion work;
382} mb4_transfer;
383
384/*
385 * mb5_transfer - state needed for mailbox 5 communication.
386 * @lock: The transaction lock.
387 * @work: The transaction completion structure.
388 * @ack: Reply ("acknowledge") data.
389 */
390static struct {
391 struct mutex lock;
392 struct completion work;
393 struct {
394 u8 status;
395 u8 value;
396 } ack;
397} mb5_transfer;
398
399static atomic_t ac_wake_req_state = ATOMIC_INIT(0);
400
401/* Spinlocks */
402static DEFINE_SPINLOCK(clkout_lock);
403static DEFINE_SPINLOCK(gpiocr_lock);
404
405/* Global var to runtime determine TCDM base for v2 or v1 */
406static __iomem void *tcdm_base;
407
408struct clk_mgt {
409 unsigned int offset;
410 u32 pllsw;
411};
412
413static DEFINE_SPINLOCK(clk_mgt_lock);
414
415#define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT), 0 }
416struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
417 CLK_MGT_ENTRY(SGACLK),
418 CLK_MGT_ENTRY(UARTCLK),
419 CLK_MGT_ENTRY(MSP02CLK),
420 CLK_MGT_ENTRY(MSP1CLK),
421 CLK_MGT_ENTRY(I2CCLK),
422 CLK_MGT_ENTRY(SDMMCCLK),
423 CLK_MGT_ENTRY(SLIMCLK),
424 CLK_MGT_ENTRY(PER1CLK),
425 CLK_MGT_ENTRY(PER2CLK),
426 CLK_MGT_ENTRY(PER3CLK),
427 CLK_MGT_ENTRY(PER5CLK),
428 CLK_MGT_ENTRY(PER6CLK),
429 CLK_MGT_ENTRY(PER7CLK),
430 CLK_MGT_ENTRY(LCDCLK),
431 CLK_MGT_ENTRY(BMLCLK),
432 CLK_MGT_ENTRY(HSITXCLK),
433 CLK_MGT_ENTRY(HSIRXCLK),
434 CLK_MGT_ENTRY(HDMICLK),
435 CLK_MGT_ENTRY(APEATCLK),
436 CLK_MGT_ENTRY(APETRACECLK),
437 CLK_MGT_ENTRY(MCDECLK),
438 CLK_MGT_ENTRY(IPI2CCLK),
439 CLK_MGT_ENTRY(DSIALTCLK),
440 CLK_MGT_ENTRY(DMACLK),
441 CLK_MGT_ENTRY(B2R2CLK),
442 CLK_MGT_ENTRY(TVCLK),
443 CLK_MGT_ENTRY(SSPCLK),
444 CLK_MGT_ENTRY(RNGCLK),
445 CLK_MGT_ENTRY(UICCCLK),
446};
447
448/*
449* Used by MCDE to setup all necessary PRCMU registers
450*/
451#define PRCMU_RESET_DSIPLL 0x00004000
452#define PRCMU_UNCLAMP_DSIPLL 0x00400800
453
454#define PRCMU_CLK_PLL_DIV_SHIFT 0
455#define PRCMU_CLK_PLL_SW_SHIFT 5
456#define PRCMU_CLK_38 (1 << 9)
457#define PRCMU_CLK_38_SRC (1 << 10)
458#define PRCMU_CLK_38_DIV (1 << 11)
459
460/* PLLDIV=12, PLLSW=4 (PLLDDR) */
461#define PRCMU_DSI_CLOCK_SETTING 0x0000008C
462
463/* PLLDIV=8, PLLSW=4 (PLLDDR) */
464#define PRCMU_DSI_CLOCK_SETTING_U8400 0x00000088
465
466/* DPI 50000000 Hz */
467#define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
468 (16 << PRCMU_CLK_PLL_DIV_SHIFT))
469#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00
470
471/* D=101, N=1, R=4, SELDIV2=0 */
472#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165
473
474/* D=70, N=1, R=3, SELDIV2=0 */
475#define PRCMU_PLLDSI_FREQ_SETTING_U8400 0x00030146
476
477#define PRCMU_ENABLE_PLLDSI 0x00000001
478#define PRCMU_DISABLE_PLLDSI 0x00000000
479#define PRCMU_RELEASE_RESET_DSS 0x0000400C
480#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202
481/* ESC clk, div0=1, div1=1, div2=3 */
482#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101
483#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101
484#define PRCMU_DSI_RESET_SW 0x00000007
485
486#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
487
488static struct {
489 u8 project_number;
490 u8 api_version;
491 u8 func_version;
492 u8 errata;
493} prcmu_version;
494
495
496int prcmu_enable_dsipll(void)
497{
498 int i;
499 unsigned int plldsifreq;
500
501 /* Clear DSIPLL_RESETN */
502 writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_CLR));
503 /* Unclamp DSIPLL in/out */
504 writel(PRCMU_UNCLAMP_DSIPLL, (_PRCMU_BASE + PRCM_MMIP_LS_CLAMP_CLR));
505
506 if (prcmu_is_u8400())
507 plldsifreq = PRCMU_PLLDSI_FREQ_SETTING_U8400;
508 else
509 plldsifreq = PRCMU_PLLDSI_FREQ_SETTING;
510 /* Set DSI PLL FREQ */
511 writel(plldsifreq, (_PRCMU_BASE + PRCM_PLLDSI_FREQ));
512 writel(PRCMU_DSI_PLLOUT_SEL_SETTING,
513 (_PRCMU_BASE + PRCM_DSI_PLLOUT_SEL));
514 /* Enable Escape clocks */
515 writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV,
516 (_PRCMU_BASE + PRCM_DSITVCLK_DIV));
517
518 /* Start DSI PLL */
519 writel(PRCMU_ENABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE));
520 /* Reset DSI PLL */
521 writel(PRCMU_DSI_RESET_SW, (_PRCMU_BASE + PRCM_DSI_SW_RESET));
522 for (i = 0; i < 10; i++) {
523 if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) &
524 PRCMU_PLLDSI_LOCKP_LOCKED)
525 == PRCMU_PLLDSI_LOCKP_LOCKED)
526 break;
527 udelay(100);
528 }
529 /* Set DSIPLL_RESETN */
530 writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_SET));
531 return 0;
532}
533
534int prcmu_disable_dsipll(void)
535{
536 /* Disable dsi pll */
537 writel(PRCMU_DISABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE));
538 /* Disable escapeclock */
539 writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV,
540 (_PRCMU_BASE + PRCM_DSITVCLK_DIV));
541 return 0;
542}
543
544int prcmu_set_display_clocks(void)
545{
546 unsigned long flags;
547 unsigned int dsiclk;
548
549 if (prcmu_is_u8400())
550 dsiclk = PRCMU_DSI_CLOCK_SETTING_U8400;
551 else
552 dsiclk = PRCMU_DSI_CLOCK_SETTING;
553
554 spin_lock_irqsave(&clk_mgt_lock, flags);
555
556 /* Grab the HW semaphore. */
557 while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
558 cpu_relax();
559
560 writel(dsiclk, (_PRCMU_BASE + PRCM_HDMICLK_MGT));
561 writel(PRCMU_DSI_LP_CLOCK_SETTING, (_PRCMU_BASE + PRCM_TVCLK_MGT));
562 writel(PRCMU_DPI_CLOCK_SETTING, (_PRCMU_BASE + PRCM_LCDCLK_MGT));
563
564 /* Release the HW semaphore. */
565 writel(0, (_PRCMU_BASE + PRCM_SEM));
566
567 spin_unlock_irqrestore(&clk_mgt_lock, flags);
568
569 return 0;
570}
571
572/**
573 * prcmu_enable_spi2 - Enables pin muxing for SPI2 on OtherAlternateC1.
574 */
575void prcmu_enable_spi2(void)
576{
577 u32 reg;
578 unsigned long flags;
579
580 spin_lock_irqsave(&gpiocr_lock, flags);
581 reg = readl(_PRCMU_BASE + PRCM_GPIOCR);
582 writel(reg | PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR);
583 spin_unlock_irqrestore(&gpiocr_lock, flags);
584}
585
586/**
587 * prcmu_disable_spi2 - Disables pin muxing for SPI2 on OtherAlternateC1.
588 */
589void prcmu_disable_spi2(void)
590{
591 u32 reg;
592 unsigned long flags;
593
594 spin_lock_irqsave(&gpiocr_lock, flags);
595 reg = readl(_PRCMU_BASE + PRCM_GPIOCR);
596 writel(reg & ~PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR);
597 spin_unlock_irqrestore(&gpiocr_lock, flags);
598}
599
600bool prcmu_has_arm_maxopp(void)
601{
602 return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
603 PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
604}
605
606bool prcmu_is_u8400(void)
607{
608 return prcmu_version.project_number == PRCMU_PROJECT_ID_8400V2_0;
609}
610
611/**
612 * prcmu_get_boot_status - PRCMU boot status checking
613 * Returns: the current PRCMU boot status
614 */
615int prcmu_get_boot_status(void)
616{
617 return readb(tcdm_base + PRCM_BOOT_STATUS);
618}
619
620/**
621 * prcmu_set_rc_a2p - This function is used to run few power state sequences
622 * @val: Value to be set, i.e. transition requested
623 * Returns: 0 on success, -EINVAL on invalid argument
624 *
625 * This function is used to run the following power state sequences -
626 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
627 */
628int prcmu_set_rc_a2p(enum romcode_write val)
629{
630 if (val < RDY_2_DS || val > RDY_2_XP70_RST)
631 return -EINVAL;
632 writeb(val, (tcdm_base + PRCM_ROMCODE_A2P));
633 return 0;
634}
635
636/**
637 * prcmu_get_rc_p2a - This function is used to get power state sequences
638 * Returns: the power transition that has last happened
639 *
640 * This function can return the following transitions-
641 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
642 */
643enum romcode_read prcmu_get_rc_p2a(void)
644{
645 return readb(tcdm_base + PRCM_ROMCODE_P2A);
646}
647
648/**
649 * prcmu_get_current_mode - Return the current XP70 power mode
650 * Returns: Returns the current AP(ARM) power mode: init,
651 * apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset
652 */
653enum ap_pwrst prcmu_get_xp70_current_state(void)
654{
655 return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE);
656}
657
658/**
659 * prcmu_config_clkout - Configure one of the programmable clock outputs.
660 * @clkout: The CLKOUT number (0 or 1).
661 * @source: The clock to be used (one of the PRCMU_CLKSRC_*).
662 * @div: The divider to be applied.
663 *
664 * Configures one of the programmable clock outputs (CLKOUTs).
665 * @div should be in the range [1,63] to request a configuration, or 0 to
666 * inform that the configuration is no longer requested.
667 */
668int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
669{
670 static int requests[2];
671 int r = 0;
672 unsigned long flags;
673 u32 val;
674 u32 bits;
675 u32 mask;
676 u32 div_mask;
677
678 BUG_ON(clkout > 1);
679 BUG_ON(div > 63);
680 BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009));
681
682 if (!div && !requests[clkout])
683 return -EINVAL;
684
685 switch (clkout) {
686 case 0:
687 div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
688 mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
689 bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
690 (div << PRCM_CLKOCR_CLKODIV0_SHIFT));
691 break;
692 case 1:
693 div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
694 mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
695 PRCM_CLKOCR_CLK1TYPE);
696 bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
697 (div << PRCM_CLKOCR_CLKODIV1_SHIFT));
698 break;
699 }
700 bits &= mask;
701
702 spin_lock_irqsave(&clkout_lock, flags);
703
704 val = readl(_PRCMU_BASE + PRCM_CLKOCR);
705 if (val & div_mask) {
706 if (div) {
707 if ((val & mask) != bits) {
708 r = -EBUSY;
709 goto unlock_and_return;
710 }
711 } else {
712 if ((val & mask & ~div_mask) != bits) {
713 r = -EINVAL;
714 goto unlock_and_return;
715 }
716 }
717 }
718 writel((bits | (val & ~mask)), (_PRCMU_BASE + PRCM_CLKOCR));
719 requests[clkout] += (div ? 1 : -1);
720
721unlock_and_return:
722 spin_unlock_irqrestore(&clkout_lock, flags);
723
724 return r;
725}
726
727int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
728{
729 unsigned long flags;
730
731 BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state));
732
733 spin_lock_irqsave(&mb0_transfer.lock, flags);
734
735 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
736 cpu_relax();
737
738 writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
739 writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE));
740 writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE));
741 writeb((keep_ulp_clk ? 1 : 0),
742 (tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE));
743 writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI));
744 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
745
746 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
747
748 return 0;
749}
750
751/* This function should only be called while mb0_transfer.lock is held. */
752static void config_wakeups(void)
753{
754 const u8 header[2] = {
755 MB0H_CONFIG_WAKEUPS_EXE,
756 MB0H_CONFIG_WAKEUPS_SLEEP
757 };
758 static u32 last_dbb_events;
759 static u32 last_abb_events;
760 u32 dbb_events;
761 u32 abb_events;
762 unsigned int i;
763
764 dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
765 dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK);
766
767 abb_events = mb0_transfer.req.abb_events;
768
769 if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
770 return;
771
772 for (i = 0; i < 2; i++) {
773 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
774 cpu_relax();
775 writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500));
776 writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500));
777 writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
778 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
779 }
780 last_dbb_events = dbb_events;
781 last_abb_events = abb_events;
782}
783
784void prcmu_enable_wakeups(u32 wakeups)
785{
786 unsigned long flags;
787 u32 bits;
788 int i;
789
790 BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
791
792 for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
793 if (wakeups & BIT(i))
794 bits |= prcmu_wakeup_bit[i];
795 }
796
797 spin_lock_irqsave(&mb0_transfer.lock, flags);
798
799 mb0_transfer.req.dbb_wakeups = bits;
800 config_wakeups();
801
802 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
803}
804
805void prcmu_config_abb_event_readout(u32 abb_events)
806{
807 unsigned long flags;
808
809 spin_lock_irqsave(&mb0_transfer.lock, flags);
810
811 mb0_transfer.req.abb_events = abb_events;
812 config_wakeups();
813
814 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
815}
816
817void prcmu_get_abb_event_buffer(void __iomem **buf)
818{
819 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
820 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500);
821 else
822 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500);
823}
824
825/**
826 * prcmu_set_arm_opp - set the appropriate ARM OPP
827 * @opp: The new ARM operating point to which transition is to be made
828 * Returns: 0 on success, non-zero on failure
829 *
830 * This function sets the the operating point of the ARM.
831 */
832int prcmu_set_arm_opp(u8 opp)
833{
834 int r;
835
836 if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK)
837 return -EINVAL;
838
839 r = 0;
840
841 mutex_lock(&mb1_transfer.lock);
842
843 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
844 cpu_relax();
845
846 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
847 writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
848 writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
849
850 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
851 wait_for_completion(&mb1_transfer.work);
852
853 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
854 (mb1_transfer.ack.arm_opp != opp))
855 r = -EIO;
856
857 mutex_unlock(&mb1_transfer.lock);
858
859 return r;
860}
861
862/**
863 * prcmu_get_arm_opp - get the current ARM OPP
864 *
865 * Returns: the current ARM OPP
866 */
867int prcmu_get_arm_opp(void)
868{
869 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP);
870}
871
872/**
873 * prcmu_get_ddr_opp - get the current DDR OPP
874 *
875 * Returns: the current DDR OPP
876 */
877int prcmu_get_ddr_opp(void)
878{
879 return readb(_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW);
880}
881
882/**
883 * set_ddr_opp - set the appropriate DDR OPP
884 * @opp: The new DDR operating point to which transition is to be made
885 * Returns: 0 on success, non-zero on failure
886 *
887 * This function sets the operating point of the DDR.
888 */
889int prcmu_set_ddr_opp(u8 opp)
890{
891 if (opp < DDR_100_OPP || opp > DDR_25_OPP)
892 return -EINVAL;
893 /* Changing the DDR OPP can hang the hardware pre-v21 */
894 if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
895 writeb(opp, (_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW));
896
897 return 0;
898}
899/**
900 * set_ape_opp - set the appropriate APE OPP
901 * @opp: The new APE operating point to which transition is to be made
902 * Returns: 0 on success, non-zero on failure
903 *
904 * This function sets the operating point of the APE.
905 */
906int prcmu_set_ape_opp(u8 opp)
907{
908 int r = 0;
909
910 mutex_lock(&mb1_transfer.lock);
911
912 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
913 cpu_relax();
914
915 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
916 writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
917 writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
918
919 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
920 wait_for_completion(&mb1_transfer.work);
921
922 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
923 (mb1_transfer.ack.ape_opp != opp))
924 r = -EIO;
925
926 mutex_unlock(&mb1_transfer.lock);
927
928 return r;
929}
930
931/**
932 * prcmu_get_ape_opp - get the current APE OPP
933 *
934 * Returns: the current APE OPP
935 */
936int prcmu_get_ape_opp(void)
937{
938 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP);
939}
940
941/**
942 * prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage
943 * @enable: true to request the higher voltage, false to drop a request.
944 *
945 * Calls to this function to enable and disable requests must be balanced.
946 */
947int prcmu_request_ape_opp_100_voltage(bool enable)
948{
949 int r = 0;
950 u8 header;
951 static unsigned int requests;
952
953 mutex_lock(&mb1_transfer.lock);
954
955 if (enable) {
956 if (0 != requests++)
957 goto unlock_and_return;
958 header = MB1H_REQUEST_APE_OPP_100_VOLT;
959 } else {
960 if (requests == 0) {
961 r = -EIO;
962 goto unlock_and_return;
963 } else if (1 != requests--) {
964 goto unlock_and_return;
965 }
966 header = MB1H_RELEASE_APE_OPP_100_VOLT;
967 }
968
969 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
970 cpu_relax();
971
972 writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
973
974 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
975 wait_for_completion(&mb1_transfer.work);
976
977 if ((mb1_transfer.ack.header != header) ||
978 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
979 r = -EIO;
980
981unlock_and_return:
982 mutex_unlock(&mb1_transfer.lock);
983
984 return r;
985}
986
987/**
988 * prcmu_release_usb_wakeup_state - release the state required by a USB wakeup
989 *
990 * This function releases the power state requirements of a USB wakeup.
991 */
992int prcmu_release_usb_wakeup_state(void)
993{
994 int r = 0;
995
996 mutex_lock(&mb1_transfer.lock);
997
998 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
999 cpu_relax();
1000
1001 writeb(MB1H_RELEASE_USB_WAKEUP,
1002 (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1003
1004 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1005 wait_for_completion(&mb1_transfer.work);
1006
1007 if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) ||
1008 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
1009 r = -EIO;
1010
1011 mutex_unlock(&mb1_transfer.lock);
1012
1013 return r;
1014}
1015
1016/**
1017 * prcmu_set_epod - set the state of a EPOD (power domain)
1018 * @epod_id: The EPOD to set
1019 * @epod_state: The new EPOD state
1020 *
1021 * This function sets the state of a EPOD (power domain). It may not be called
1022 * from interrupt context.
1023 */
1024int prcmu_set_epod(u16 epod_id, u8 epod_state)
1025{
1026 int r = 0;
1027 bool ram_retention = false;
1028 int i;
1029
1030 /* check argument */
1031 BUG_ON(epod_id >= NUM_EPOD_ID);
1032
1033 /* set flag if retention is possible */
1034 switch (epod_id) {
1035 case EPOD_ID_SVAMMDSP:
1036 case EPOD_ID_SIAMMDSP:
1037 case EPOD_ID_ESRAM12:
1038 case EPOD_ID_ESRAM34:
1039 ram_retention = true;
1040 break;
1041 }
1042
1043 /* check argument */
1044 BUG_ON(epod_state > EPOD_STATE_ON);
1045 BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
1046
1047 /* get lock */
1048 mutex_lock(&mb2_transfer.lock);
1049
1050 /* wait for mailbox */
1051 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
1052 cpu_relax();
1053
1054 /* fill in mailbox */
1055 for (i = 0; i < NUM_EPOD_ID; i++)
1056 writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i));
1057 writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id));
1058
1059 writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2));
1060
1061 writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1062
1063 /*
1064 * The current firmware version does not handle errors correctly,
1065 * and we cannot recover if there is an error.
1066 * This is expected to change when the firmware is updated.
1067 */
1068 if (!wait_for_completion_timeout(&mb2_transfer.work,
1069 msecs_to_jiffies(20000))) {
1070 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1071 __func__);
1072 r = -EIO;
1073 goto unlock_and_return;
1074 }
1075
1076 if (mb2_transfer.ack.status != HWACC_PWR_ST_OK)
1077 r = -EIO;
1078
1079unlock_and_return:
1080 mutex_unlock(&mb2_transfer.lock);
1081 return r;
1082}
1083
1084/**
1085 * prcmu_configure_auto_pm - Configure autonomous power management.
1086 * @sleep: Configuration for ApSleep.
1087 * @idle: Configuration for ApIdle.
1088 */
1089void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
1090 struct prcmu_auto_pm_config *idle)
1091{
1092 u32 sleep_cfg;
1093 u32 idle_cfg;
1094 unsigned long flags;
1095
1096 BUG_ON((sleep == NULL) || (idle == NULL));
1097
1098 sleep_cfg = (sleep->sva_auto_pm_enable & 0xF);
1099 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF));
1100 sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF));
1101 sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF));
1102 sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF));
1103 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF));
1104
1105 idle_cfg = (idle->sva_auto_pm_enable & 0xF);
1106 idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF));
1107 idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF));
1108 idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF));
1109 idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF));
1110 idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF));
1111
1112 spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags);
1113
1114 /*
1115 * The autonomous power management configuration is done through
1116 * fields in mailbox 2, but these fields are only used as shared
1117 * variables - i.e. there is no need to send a message.
1118 */
1119 writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP));
1120 writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE));
1121
1122 mb2_transfer.auto_pm_enabled =
1123 ((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1124 (sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1125 (idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1126 (idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON));
1127
1128 spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags);
1129}
1130EXPORT_SYMBOL(prcmu_configure_auto_pm);
1131
1132bool prcmu_is_auto_pm_enabled(void)
1133{
1134 return mb2_transfer.auto_pm_enabled;
1135}
1136
1137static int request_sysclk(bool enable)
1138{
1139 int r;
1140 unsigned long flags;
1141
1142 r = 0;
1143
1144 mutex_lock(&mb3_transfer.sysclk_lock);
1145
1146 spin_lock_irqsave(&mb3_transfer.lock, flags);
1147
1148 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
1149 cpu_relax();
1150
1151 writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT));
1152
1153 writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3));
1154 writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1155
1156 spin_unlock_irqrestore(&mb3_transfer.lock, flags);
1157
1158 /*
1159 * The firmware only sends an ACK if we want to enable the
1160 * SysClk, and it succeeds.
1161 */
1162 if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work,
1163 msecs_to_jiffies(20000))) {
1164 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1165 __func__);
1166 r = -EIO;
1167 }
1168
1169 mutex_unlock(&mb3_transfer.sysclk_lock);
1170
1171 return r;
1172}
1173
1174static int request_timclk(bool enable)
1175{
1176 u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
1177
1178 if (!enable)
1179 val |= PRCM_TCR_STOP_TIMERS;
1180 writel(val, (_PRCMU_BASE + PRCM_TCR));
1181
1182 return 0;
1183}
1184
1185static int request_reg_clock(u8 clock, bool enable)
1186{
1187 u32 val;
1188 unsigned long flags;
1189
1190 spin_lock_irqsave(&clk_mgt_lock, flags);
1191
1192 /* Grab the HW semaphore. */
1193 while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1194 cpu_relax();
1195
1196 val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1197 if (enable) {
1198 val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
1199 } else {
1200 clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
1201 val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
1202 }
1203 writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1204
1205 /* Release the HW semaphore. */
1206 writel(0, (_PRCMU_BASE + PRCM_SEM));
1207
1208 spin_unlock_irqrestore(&clk_mgt_lock, flags);
1209
1210 return 0;
1211}
1212
1213/**
1214 * prcmu_request_clock() - Request for a clock to be enabled or disabled.
1215 * @clock: The clock for which the request is made.
1216 * @enable: Whether the clock should be enabled (true) or disabled (false).
1217 *
1218 * This function should only be used by the clock implementation.
1219 * Do not use it from any other place!
1220 */
1221int prcmu_request_clock(u8 clock, bool enable)
1222{
1223 if (clock < PRCMU_NUM_REG_CLOCKS)
1224 return request_reg_clock(clock, enable);
1225 else if (clock == PRCMU_TIMCLK)
1226 return request_timclk(enable);
1227 else if (clock == PRCMU_SYSCLK)
1228 return request_sysclk(enable);
1229 else
1230 return -EINVAL;
1231}
1232
1233int prcmu_config_esram0_deep_sleep(u8 state)
1234{
1235 if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
1236 (state < ESRAM0_DEEP_SLEEP_STATE_OFF))
1237 return -EINVAL;
1238
1239 mutex_lock(&mb4_transfer.lock);
1240
1241 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1242 cpu_relax();
1243
1244 writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1245 writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON),
1246 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE));
1247 writeb(DDR_PWR_STATE_ON,
1248 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE));
1249 writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST));
1250
1251 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1252 wait_for_completion(&mb4_transfer.work);
1253
1254 mutex_unlock(&mb4_transfer.lock);
1255
1256 return 0;
1257}
1258
1259int prcmu_config_hotdog(u8 threshold)
1260{
1261 mutex_lock(&mb4_transfer.lock);
1262
1263 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1264 cpu_relax();
1265
1266 writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD));
1267 writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1268
1269 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1270 wait_for_completion(&mb4_transfer.work);
1271
1272 mutex_unlock(&mb4_transfer.lock);
1273
1274 return 0;
1275}
1276
1277int prcmu_config_hotmon(u8 low, u8 high)
1278{
1279 mutex_lock(&mb4_transfer.lock);
1280
1281 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1282 cpu_relax();
1283
1284 writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW));
1285 writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH));
1286 writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH),
1287 (tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG));
1288 writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1289
1290 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1291 wait_for_completion(&mb4_transfer.work);
1292
1293 mutex_unlock(&mb4_transfer.lock);
1294
1295 return 0;
1296}
1297
1298static int config_hot_period(u16 val)
1299{
1300 mutex_lock(&mb4_transfer.lock);
1301
1302 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1303 cpu_relax();
1304
1305 writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD));
1306 writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1307
1308 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1309 wait_for_completion(&mb4_transfer.work);
1310
1311 mutex_unlock(&mb4_transfer.lock);
1312
1313 return 0;
1314}
1315
1316int prcmu_start_temp_sense(u16 cycles32k)
1317{
1318 if (cycles32k == 0xFFFF)
1319 return -EINVAL;
1320
1321 return config_hot_period(cycles32k);
1322}
1323
1324int prcmu_stop_temp_sense(void)
1325{
1326 return config_hot_period(0xFFFF);
1327}
1328
1329/**
1330 * prcmu_set_clock_divider() - Configure the clock divider.
1331 * @clock: The clock for which the request is made.
1332 * @divider: The clock divider. (< 32)
1333 *
1334 * This function should only be used by the clock implementation.
1335 * Do not use it from any other place!
1336 */
1337int prcmu_set_clock_divider(u8 clock, u8 divider)
1338{
1339 u32 val;
1340 unsigned long flags;
1341
1342 if ((clock >= PRCMU_NUM_REG_CLOCKS) || (divider < 1) || (31 < divider))
1343 return -EINVAL;
1344
1345 spin_lock_irqsave(&clk_mgt_lock, flags);
1346
1347 /* Grab the HW semaphore. */
1348 while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1349 cpu_relax();
1350
1351 val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1352 val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK);
1353 val |= (u32)divider;
1354 writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1355
1356 /* Release the HW semaphore. */
1357 writel(0, (_PRCMU_BASE + PRCM_SEM));
1358
1359 spin_unlock_irqrestore(&clk_mgt_lock, flags);
1360
1361 return 0;
1362}
1363
1364/**
1365 * prcmu_abb_read() - Read register value(s) from the ABB.
1366 * @slave: The I2C slave address.
1367 * @reg: The (start) register address.
1368 * @value: The read out value(s).
1369 * @size: The number of registers to read.
1370 *
1371 * Reads register value(s) from the ABB.
1372 * @size has to be 1 for the current firmware version.
1373 */
1374int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
1375{
1376 int r;
1377
1378 if (size != 1)
1379 return -EINVAL;
1380
1381 mutex_lock(&mb5_transfer.lock);
1382
1383 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1384 cpu_relax();
1385
1386 writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1387 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1388 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1389 writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1390
1391 writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1392
1393 if (!wait_for_completion_timeout(&mb5_transfer.work,
1394 msecs_to_jiffies(20000))) {
1395 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1396 __func__);
1397 r = -EIO;
1398 } else {
1399 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
1400 }
1401
1402 if (!r)
1403 *value = mb5_transfer.ack.value;
1404
1405 mutex_unlock(&mb5_transfer.lock);
1406
1407 return r;
1408}
1409
1410/**
1411 * prcmu_abb_write() - Write register value(s) to the ABB.
1412 * @slave: The I2C slave address.
1413 * @reg: The (start) register address.
1414 * @value: The value(s) to write.
1415 * @size: The number of registers to write.
1416 *
1417 * Reads register value(s) from the ABB.
1418 * @size has to be 1 for the current firmware version.
1419 */
1420int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
1421{
1422 int r;
1423
1424 if (size != 1)
1425 return -EINVAL;
1426
1427 mutex_lock(&mb5_transfer.lock);
1428
1429 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1430 cpu_relax();
1431
1432 writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1433 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1434 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1435 writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1436
1437 writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1438
1439 if (!wait_for_completion_timeout(&mb5_transfer.work,
1440 msecs_to_jiffies(20000))) {
1441 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1442 __func__);
1443 r = -EIO;
1444 } else {
1445 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
1446 }
1447
1448 mutex_unlock(&mb5_transfer.lock);
1449
1450 return r;
1451}
1452
1453/**
1454 * prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem
1455 */
1456void prcmu_ac_wake_req(void)
1457{
1458 u32 val;
1459
1460 mutex_lock(&mb0_transfer.ac_wake_lock);
1461
1462 val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
1463 if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)
1464 goto unlock_and_return;
1465
1466 atomic_set(&ac_wake_req_state, 1);
1467
1468 writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
1469 (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
1470
1471 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1472 msecs_to_jiffies(20000))) {
1473 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1474 __func__);
1475 }
1476
1477unlock_and_return:
1478 mutex_unlock(&mb0_transfer.ac_wake_lock);
1479}
1480
1481/**
1482 * prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem
1483 */
1484void prcmu_ac_sleep_req()
1485{
1486 u32 val;
1487
1488 mutex_lock(&mb0_transfer.ac_wake_lock);
1489
1490 val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ);
1491 if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ))
1492 goto unlock_and_return;
1493
1494 writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
1495 (_PRCMU_BASE + PRCM_HOSTACCESS_REQ));
1496
1497 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1498 msecs_to_jiffies(20000))) {
1499 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1500 __func__);
1501 }
1502
1503 atomic_set(&ac_wake_req_state, 0);
1504
1505unlock_and_return:
1506 mutex_unlock(&mb0_transfer.ac_wake_lock);
1507}
1508
1509bool prcmu_is_ac_wake_requested(void)
1510{
1511 return (atomic_read(&ac_wake_req_state) != 0);
1512}
1513
1514/**
1515 * prcmu_system_reset - System reset
1516 *
1517 * Saves the reset reason code and then sets the APE_SOFRST register which
1518 * fires interrupt to fw
1519 */
1520void prcmu_system_reset(u16 reset_code)
1521{
1522 writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON));
1523 writel(1, (_PRCMU_BASE + PRCM_APE_SOFTRST));
1524}
1525
1526/**
1527 * prcmu_reset_modem - ask the PRCMU to reset modem
1528 */
1529void prcmu_modem_reset(void)
1530{
1531 mutex_lock(&mb1_transfer.lock);
1532
1533 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
1534 cpu_relax();
1535
1536 writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1537 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1538 wait_for_completion(&mb1_transfer.work);
1539
1540 /*
1541 * No need to check return from PRCMU as modem should go in reset state
1542 * This state is already managed by upper layer
1543 */
1544
1545 mutex_unlock(&mb1_transfer.lock);
1546}
1547
1548static void ack_dbb_wakeup(void)
1549{
1550 unsigned long flags;
1551
1552 spin_lock_irqsave(&mb0_transfer.lock, flags);
1553
1554 while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
1555 cpu_relax();
1556
1557 writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
1558 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET));
1559
1560 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1561}
1562
1563static inline void print_unknown_header_warning(u8 n, u8 header)
1564{
1565 pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
1566 header, n);
1567}
1568
1569static bool read_mailbox_0(void)
1570{
1571 bool r;
1572 u32 ev;
1573 unsigned int n;
1574 u8 header;
1575
1576 header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0);
1577 switch (header) {
1578 case MB0H_WAKEUP_EXE:
1579 case MB0H_WAKEUP_SLEEP:
1580 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
1581 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500);
1582 else
1583 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500);
1584
1585 if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK))
1586 complete(&mb0_transfer.ac_wake_work);
1587 if (ev & WAKEUP_BIT_SYSCLK_OK)
1588 complete(&mb3_transfer.sysclk_work);
1589
1590 ev &= mb0_transfer.req.dbb_irqs;
1591
1592 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
1593 if (ev & prcmu_irq_bit[n])
1594 generic_handle_irq(IRQ_PRCMU_BASE + n);
1595 }
1596 r = true;
1597 break;
1598 default:
1599 print_unknown_header_warning(0, header);
1600 r = false;
1601 break;
1602 }
1603 writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1604 return r;
1605}
1606
1607static bool read_mailbox_1(void)
1608{
1609 mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1);
1610 mb1_transfer.ack.arm_opp = readb(tcdm_base +
1611 PRCM_ACK_MB1_CURRENT_ARM_OPP);
1612 mb1_transfer.ack.ape_opp = readb(tcdm_base +
1613 PRCM_ACK_MB1_CURRENT_APE_OPP);
1614 mb1_transfer.ack.ape_voltage_status = readb(tcdm_base +
1615 PRCM_ACK_MB1_APE_VOLTAGE_STATUS);
1616 writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1617 complete(&mb1_transfer.work);
1618 return false;
1619}
1620
1621static bool read_mailbox_2(void)
1622{
1623 mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS);
1624 writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1625 complete(&mb2_transfer.work);
1626 return false;
1627}
1628
1629static bool read_mailbox_3(void)
1630{
1631 writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1632 return false;
1633}
1634
1635static bool read_mailbox_4(void)
1636{
1637 u8 header;
1638 bool do_complete = true;
1639
1640 header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4);
1641 switch (header) {
1642 case MB4H_MEM_ST:
1643 case MB4H_HOTDOG:
1644 case MB4H_HOTMON:
1645 case MB4H_HOT_PERIOD:
1646 break;
1647 default:
1648 print_unknown_header_warning(4, header);
1649 do_complete = false;
1650 break;
1651 }
1652
1653 writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1654
1655 if (do_complete)
1656 complete(&mb4_transfer.work);
1657
1658 return false;
1659}
1660
1661static bool read_mailbox_5(void)
1662{
1663 mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS);
1664 mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL);
1665 writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1666 complete(&mb5_transfer.work);
1667 return false;
1668}
1669
1670static bool read_mailbox_6(void)
1671{
1672 writel(MBOX_BIT(6), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1673 return false;
1674}
1675
1676static bool read_mailbox_7(void)
1677{
1678 writel(MBOX_BIT(7), (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
1679 return false;
1680}
1681
1682static bool (* const read_mailbox[NUM_MB])(void) = {
1683 read_mailbox_0,
1684 read_mailbox_1,
1685 read_mailbox_2,
1686 read_mailbox_3,
1687 read_mailbox_4,
1688 read_mailbox_5,
1689 read_mailbox_6,
1690 read_mailbox_7
1691};
1692
1693static irqreturn_t prcmu_irq_handler(int irq, void *data)
1694{
1695 u32 bits;
1696 u8 n;
1697 irqreturn_t r;
1698
1699 bits = (readl(_PRCMU_BASE + PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
1700 if (unlikely(!bits))
1701 return IRQ_NONE;
1702
1703 r = IRQ_HANDLED;
1704 for (n = 0; bits; n++) {
1705 if (bits & MBOX_BIT(n)) {
1706 bits -= MBOX_BIT(n);
1707 if (read_mailbox[n]())
1708 r = IRQ_WAKE_THREAD;
1709 }
1710 }
1711 return r;
1712}
1713
1714static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
1715{
1716 ack_dbb_wakeup();
1717 return IRQ_HANDLED;
1718}
1719
1720static void prcmu_mask_work(struct work_struct *work)
1721{
1722 unsigned long flags;
1723
1724 spin_lock_irqsave(&mb0_transfer.lock, flags);
1725
1726 config_wakeups();
1727
1728 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1729}
1730
1731static void prcmu_irq_mask(struct irq_data *d)
1732{
1733 unsigned long flags;
1734
1735 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
1736
1737 mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
1738
1739 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
1740
1741 if (d->irq != IRQ_PRCMU_CA_SLEEP)
1742 schedule_work(&mb0_transfer.mask_work);
1743}
1744
1745static void prcmu_irq_unmask(struct irq_data *d)
1746{
1747 unsigned long flags;
1748
1749 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
1750
1751 mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
1752
1753 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
1754
1755 if (d->irq != IRQ_PRCMU_CA_SLEEP)
1756 schedule_work(&mb0_transfer.mask_work);
1757}
1758
1759static void noop(struct irq_data *d)
1760{
1761}
1762
1763static struct irq_chip prcmu_irq_chip = {
1764 .name = "prcmu",
1765 .irq_disable = prcmu_irq_mask,
1766 .irq_ack = noop,
1767 .irq_mask = prcmu_irq_mask,
1768 .irq_unmask = prcmu_irq_unmask,
1769};
1770
1771void __init prcmu_early_init(void)
1772{
1773 unsigned int i;
1774
1775 if (cpu_is_u8500v1()) {
1776 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
1777 } else if (cpu_is_u8500v2()) {
1778 void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
1779
1780 if (tcpm_base != NULL) {
1781 int version;
1782 version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
1783 prcmu_version.project_number = version & 0xFF;
1784 prcmu_version.api_version = (version >> 8) & 0xFF;
1785 prcmu_version.func_version = (version >> 16) & 0xFF;
1786 prcmu_version.errata = (version >> 24) & 0xFF;
1787 pr_info("PRCMU firmware version %d.%d.%d\n",
1788 (version >> 8) & 0xFF, (version >> 16) & 0xFF,
1789 (version >> 24) & 0xFF);
1790 iounmap(tcpm_base);
1791 }
1792
1793 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
1794 } else {
1795 pr_err("prcmu: Unsupported chip version\n");
1796 BUG();
1797 }
1798
1799 spin_lock_init(&mb0_transfer.lock);
1800 spin_lock_init(&mb0_transfer.dbb_irqs_lock);
1801 mutex_init(&mb0_transfer.ac_wake_lock);
1802 init_completion(&mb0_transfer.ac_wake_work);
1803 mutex_init(&mb1_transfer.lock);
1804 init_completion(&mb1_transfer.work);
1805 mutex_init(&mb2_transfer.lock);
1806 init_completion(&mb2_transfer.work);
1807 spin_lock_init(&mb2_transfer.auto_pm_lock);
1808 spin_lock_init(&mb3_transfer.lock);
1809 mutex_init(&mb3_transfer.sysclk_lock);
1810 init_completion(&mb3_transfer.sysclk_work);
1811 mutex_init(&mb4_transfer.lock);
1812 init_completion(&mb4_transfer.work);
1813 mutex_init(&mb5_transfer.lock);
1814 init_completion(&mb5_transfer.work);
1815
1816 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
1817
1818 /* Initalize irqs. */
1819 for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) {
1820 unsigned int irq;
1821
1822 irq = IRQ_PRCMU_BASE + i;
1823 irq_set_chip_and_handler(irq, &prcmu_irq_chip,
1824 handle_simple_irq);
1825 set_irq_flags(irq, IRQF_VALID);
1826 }
1827}
1828
1829/*
1830 * Power domain switches (ePODs) modeled as regulators for the DB8500 SoC
1831 */
1832static struct regulator_consumer_supply db8500_vape_consumers[] = {
1833 REGULATOR_SUPPLY("v-ape", NULL),
1834 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"),
1835 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
1836 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
1837 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
1838 /* "v-mmc" changed to "vcore" in the mainline kernel */
1839 REGULATOR_SUPPLY("vcore", "sdi0"),
1840 REGULATOR_SUPPLY("vcore", "sdi1"),
1841 REGULATOR_SUPPLY("vcore", "sdi2"),
1842 REGULATOR_SUPPLY("vcore", "sdi3"),
1843 REGULATOR_SUPPLY("vcore", "sdi4"),
1844 REGULATOR_SUPPLY("v-dma", "dma40.0"),
1845 REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"),
1846 /* "v-uart" changed to "vcore" in the mainline kernel */
1847 REGULATOR_SUPPLY("vcore", "uart0"),
1848 REGULATOR_SUPPLY("vcore", "uart1"),
1849 REGULATOR_SUPPLY("vcore", "uart2"),
1850 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
1851};
1852
1853static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
1854 /* CG2900 and CW1200 power to off-chip peripherals */
1855 REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
1856 REGULATOR_SUPPLY("wlan_1v8", "cw1200.0"),
1857 REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"),
1858 /* AV8100 regulator */
1859 REGULATOR_SUPPLY("hdmi_1v8", "0-0070"),
1860};
1861
1862static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
1863 REGULATOR_SUPPLY("vsupply", "b2r2.0"),
1864 REGULATOR_SUPPLY("vsupply", "mcde.0"),
1865};
1866
1867static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
1868 [DB8500_REGULATOR_VAPE] = {
1869 .constraints = {
1870 .name = "db8500-vape",
1871 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1872 },
1873 .consumer_supplies = db8500_vape_consumers,
1874 .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),
1875 },
1876 [DB8500_REGULATOR_VARM] = {
1877 .constraints = {
1878 .name = "db8500-varm",
1879 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1880 },
1881 },
1882 [DB8500_REGULATOR_VMODEM] = {
1883 .constraints = {
1884 .name = "db8500-vmodem",
1885 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1886 },
1887 },
1888 [DB8500_REGULATOR_VPLL] = {
1889 .constraints = {
1890 .name = "db8500-vpll",
1891 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1892 },
1893 },
1894 [DB8500_REGULATOR_VSMPS1] = {
1895 .constraints = {
1896 .name = "db8500-vsmps1",
1897 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1898 },
1899 },
1900 [DB8500_REGULATOR_VSMPS2] = {
1901 .constraints = {
1902 .name = "db8500-vsmps2",
1903 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1904 },
1905 .consumer_supplies = db8500_vsmps2_consumers,
1906 .num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers),
1907 },
1908 [DB8500_REGULATOR_VSMPS3] = {
1909 .constraints = {
1910 .name = "db8500-vsmps3",
1911 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1912 },
1913 },
1914 [DB8500_REGULATOR_VRF1] = {
1915 .constraints = {
1916 .name = "db8500-vrf1",
1917 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1918 },
1919 },
1920 [DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
1921 .supply_regulator = "db8500-vape",
1922 .constraints = {
1923 .name = "db8500-sva-mmdsp",
1924 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1925 },
1926 },
1927 [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
1928 .constraints = {
1929 /* "ret" means "retention" */
1930 .name = "db8500-sva-mmdsp-ret",
1931 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1932 },
1933 },
1934 [DB8500_REGULATOR_SWITCH_SVAPIPE] = {
1935 .supply_regulator = "db8500-vape",
1936 .constraints = {
1937 .name = "db8500-sva-pipe",
1938 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1939 },
1940 },
1941 [DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
1942 .supply_regulator = "db8500-vape",
1943 .constraints = {
1944 .name = "db8500-sia-mmdsp",
1945 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1946 },
1947 },
1948 [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
1949 .constraints = {
1950 .name = "db8500-sia-mmdsp-ret",
1951 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1952 },
1953 },
1954 [DB8500_REGULATOR_SWITCH_SIAPIPE] = {
1955 .supply_regulator = "db8500-vape",
1956 .constraints = {
1957 .name = "db8500-sia-pipe",
1958 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1959 },
1960 },
1961 [DB8500_REGULATOR_SWITCH_SGA] = {
1962 .supply_regulator = "db8500-vape",
1963 .constraints = {
1964 .name = "db8500-sga",
1965 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1966 },
1967 },
1968 [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
1969 .supply_regulator = "db8500-vape",
1970 .constraints = {
1971 .name = "db8500-b2r2-mcde",
1972 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1973 },
1974 .consumer_supplies = db8500_b2r2_mcde_consumers,
1975 .num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers),
1976 },
1977 [DB8500_REGULATOR_SWITCH_ESRAM12] = {
1978 .supply_regulator = "db8500-vape",
1979 .constraints = {
1980 .name = "db8500-esram12",
1981 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1982 },
1983 },
1984 [DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
1985 .constraints = {
1986 .name = "db8500-esram12-ret",
1987 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1988 },
1989 },
1990 [DB8500_REGULATOR_SWITCH_ESRAM34] = {
1991 .supply_regulator = "db8500-vape",
1992 .constraints = {
1993 .name = "db8500-esram34",
1994 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1995 },
1996 },
1997 [DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
1998 .constraints = {
1999 .name = "db8500-esram34-ret",
2000 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2001 },
2002 },
2003};
2004
2005static struct mfd_cell db8500_prcmu_devs[] = {
2006 {
2007 .name = "db8500-prcmu-regulators",
2008 .mfd_data = &db8500_regulators,
2009 },
2010 {
2011 .name = "cpufreq-u8500",
2012 },
2013};
2014
2015/**
2016 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
2017 *
2018 */
2019static int __init db8500_prcmu_probe(struct platform_device *pdev)
2020{
2021 int err = 0;
2022
2023 if (ux500_is_svp())
2024 return -ENODEV;
2025
2026 /* Clean up the mailbox interrupts after pre-kernel code. */
2027 writel(ALL_MBOX_BITS, (_PRCMU_BASE + PRCM_ARM_IT1_CLR));
2028
2029 err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
2030 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
2031 if (err < 0) {
2032 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
2033 err = -EBUSY;
2034 goto no_irq_return;
2035 }
2036
2037 if (cpu_is_u8500v20_or_later())
2038 prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
2039
2040 err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
2041 ARRAY_SIZE(db8500_prcmu_devs), NULL,
2042 0);
2043
2044 if (err)
2045 pr_err("prcmu: Failed to add subdevices\n");
2046 else
2047 pr_info("DB8500 PRCMU initialized\n");
2048
2049no_irq_return:
2050 return err;
2051}
2052
2053static struct platform_driver db8500_prcmu_driver = {
2054 .driver = {
2055 .name = "db8500-prcmu",
2056 .owner = THIS_MODULE,
2057 },
2058};
2059
2060static int __init db8500_prcmu_init(void)
2061{
2062 return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
2063}
2064
2065arch_initcall(db8500_prcmu_init);
2066
2067MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>");
2068MODULE_DESCRIPTION("DB8500 PRCM Unit driver");
2069MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 209fbb70619b..776a478e6296 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_ATL2) += atlx/
31obj-$(CONFIG_ATL1E) += atl1e/ 31obj-$(CONFIG_ATL1E) += atl1e/
32obj-$(CONFIG_ATL1C) += atl1c/ 32obj-$(CONFIG_ATL1C) += atl1c/
33obj-$(CONFIG_GIANFAR) += gianfar_driver.o 33obj-$(CONFIG_GIANFAR) += gianfar_driver.o
34obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
34obj-$(CONFIG_TEHUTI) += tehuti.o 35obj-$(CONFIG_TEHUTI) += tehuti.o
35obj-$(CONFIG_ENIC) += enic/ 36obj-$(CONFIG_ENIC) += enic/
36obj-$(CONFIG_JME) += jme.o 37obj-$(CONFIG_JME) += jme.o
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 9eb9b98a7ae3..de51e8453c13 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -30,9 +30,12 @@
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/net_tstamp.h>
33#include <linux/phy.h> 34#include <linux/phy.h>
34#include <linux/platform_device.h> 35#include <linux/platform_device.h>
36#include <linux/ptp_classify.h>
35#include <linux/slab.h> 37#include <linux/slab.h>
38#include <mach/ixp46x_ts.h>
36#include <mach/npe.h> 39#include <mach/npe.h>
37#include <mach/qmgr.h> 40#include <mach/qmgr.h>
38 41
@@ -67,6 +70,10 @@
67#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) 70#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
68#define TXDONE_QUEUE 31 71#define TXDONE_QUEUE 31
69 72
73#define PTP_SLAVE_MODE 1
74#define PTP_MASTER_MODE 2
75#define PORT2CHANNEL(p) NPE_ID(p->id)
76
70/* TX Control Registers */ 77/* TX Control Registers */
71#define TX_CNTRL0_TX_EN 0x01 78#define TX_CNTRL0_TX_EN 0x01
72#define TX_CNTRL0_HALFDUPLEX 0x02 79#define TX_CNTRL0_HALFDUPLEX 0x02
@@ -171,6 +178,8 @@ struct port {
171 int id; /* logical port ID */ 178 int id; /* logical port ID */
172 int speed, duplex; 179 int speed, duplex;
173 u8 firmware[4]; 180 u8 firmware[4];
181 int hwts_tx_en;
182 int hwts_rx_en;
174}; 183};
175 184
176/* NPE message structure */ 185/* NPE message structure */
@@ -246,6 +255,172 @@ static int ports_open;
246static struct port *npe_port_tab[MAX_NPES]; 255static struct port *npe_port_tab[MAX_NPES];
247static struct dma_pool *dma_pool; 256static struct dma_pool *dma_pool;
248 257
258static struct sock_filter ptp_filter[] = {
259 PTP_FILTER
260};
261
262static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
263{
264 u8 *data = skb->data;
265 unsigned int offset;
266 u16 *hi, *id;
267 u32 lo;
268
269 if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
270 return 0;
271
272 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
273
274 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
275 return 0;
276
277 hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
278 id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
279
280 memcpy(&lo, &hi[1], sizeof(lo));
281
282 return (uid_hi == ntohs(*hi) &&
283 uid_lo == ntohl(lo) &&
284 seqid == ntohs(*id));
285}
286
287static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
288{
289 struct skb_shared_hwtstamps *shhwtstamps;
290 struct ixp46x_ts_regs *regs;
291 u64 ns;
292 u32 ch, hi, lo, val;
293 u16 uid, seq;
294
295 if (!port->hwts_rx_en)
296 return;
297
298 ch = PORT2CHANNEL(port);
299
300 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
301
302 val = __raw_readl(&regs->channel[ch].ch_event);
303
304 if (!(val & RX_SNAPSHOT_LOCKED))
305 return;
306
307 lo = __raw_readl(&regs->channel[ch].src_uuid_lo);
308 hi = __raw_readl(&regs->channel[ch].src_uuid_hi);
309
310 uid = hi & 0xffff;
311 seq = (hi >> 16) & 0xffff;
312
313 if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
314 goto out;
315
316 lo = __raw_readl(&regs->channel[ch].rx_snap_lo);
317 hi = __raw_readl(&regs->channel[ch].rx_snap_hi);
318 ns = ((u64) hi) << 32;
319 ns |= lo;
320 ns <<= TICKS_NS_SHIFT;
321
322 shhwtstamps = skb_hwtstamps(skb);
323 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
324 shhwtstamps->hwtstamp = ns_to_ktime(ns);
325out:
326 __raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
327}
328
329static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
330{
331 struct skb_shared_hwtstamps shhwtstamps;
332 struct ixp46x_ts_regs *regs;
333 struct skb_shared_info *shtx;
334 u64 ns;
335 u32 ch, cnt, hi, lo, val;
336
337 shtx = skb_shinfo(skb);
338 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
339 shtx->tx_flags |= SKBTX_IN_PROGRESS;
340 else
341 return;
342
343 ch = PORT2CHANNEL(port);
344
345 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
346
347 /*
348 * This really stinks, but we have to poll for the Tx time stamp.
349 * Usually, the time stamp is ready after 4 to 6 microseconds.
350 */
351 for (cnt = 0; cnt < 100; cnt++) {
352 val = __raw_readl(&regs->channel[ch].ch_event);
353 if (val & TX_SNAPSHOT_LOCKED)
354 break;
355 udelay(1);
356 }
357 if (!(val & TX_SNAPSHOT_LOCKED)) {
358 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
359 return;
360 }
361
362 lo = __raw_readl(&regs->channel[ch].tx_snap_lo);
363 hi = __raw_readl(&regs->channel[ch].tx_snap_hi);
364 ns = ((u64) hi) << 32;
365 ns |= lo;
366 ns <<= TICKS_NS_SHIFT;
367
368 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
369 shhwtstamps.hwtstamp = ns_to_ktime(ns);
370 skb_tstamp_tx(skb, &shhwtstamps);
371
372 __raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
373}
374
375static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
376{
377 struct hwtstamp_config cfg;
378 struct ixp46x_ts_regs *regs;
379 struct port *port = netdev_priv(netdev);
380 int ch;
381
382 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
383 return -EFAULT;
384
385 if (cfg.flags) /* reserved for future extensions */
386 return -EINVAL;
387
388 ch = PORT2CHANNEL(port);
389 regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
390
391 switch (cfg.tx_type) {
392 case HWTSTAMP_TX_OFF:
393 port->hwts_tx_en = 0;
394 break;
395 case HWTSTAMP_TX_ON:
396 port->hwts_tx_en = 1;
397 break;
398 default:
399 return -ERANGE;
400 }
401
402 switch (cfg.rx_filter) {
403 case HWTSTAMP_FILTER_NONE:
404 port->hwts_rx_en = 0;
405 break;
406 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
407 port->hwts_rx_en = PTP_SLAVE_MODE;
408 __raw_writel(0, &regs->channel[ch].ch_control);
409 break;
410 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
411 port->hwts_rx_en = PTP_MASTER_MODE;
412 __raw_writel(MASTER_MODE, &regs->channel[ch].ch_control);
413 break;
414 default:
415 return -ERANGE;
416 }
417
418 /* Clear out any old time stamps. */
419 __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
420 &regs->channel[ch].ch_event);
421
422 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
423}
249 424
250static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, 425static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
251 int write, u16 cmd) 426 int write, u16 cmd)
@@ -573,6 +748,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
573 748
574 debug_pkt(dev, "eth_poll", skb->data, skb->len); 749 debug_pkt(dev, "eth_poll", skb->data, skb->len);
575 750
751 ixp_rx_timestamp(port, skb);
576 skb->protocol = eth_type_trans(skb, dev); 752 skb->protocol = eth_type_trans(skb, dev);
577 dev->stats.rx_packets++; 753 dev->stats.rx_packets++;
578 dev->stats.rx_bytes += skb->len; 754 dev->stats.rx_bytes += skb->len;
@@ -679,14 +855,12 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
679 return NETDEV_TX_OK; 855 return NETDEV_TX_OK;
680 } 856 }
681 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); 857 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
682 dev_kfree_skb(skb);
683#endif 858#endif
684 859
685 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 860 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
686 if (dma_mapping_error(&dev->dev, phys)) { 861 if (dma_mapping_error(&dev->dev, phys)) {
687#ifdef __ARMEB__
688 dev_kfree_skb(skb); 862 dev_kfree_skb(skb);
689#else 863#ifndef __ARMEB__
690 kfree(mem); 864 kfree(mem);
691#endif 865#endif
692 dev->stats.tx_dropped++; 866 dev->stats.tx_dropped++;
@@ -728,6 +902,13 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
728#if DEBUG_TX 902#if DEBUG_TX
729 printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name); 903 printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
730#endif 904#endif
905
906 ixp_tx_timestamp(port, skb);
907 skb_tx_timestamp(skb);
908
909#ifndef __ARMEB__
910 dev_kfree_skb(skb);
911#endif
731 return NETDEV_TX_OK; 912 return NETDEV_TX_OK;
732} 913}
733 914
@@ -783,6 +964,9 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
783 if (!netif_running(dev)) 964 if (!netif_running(dev))
784 return -EINVAL; 965 return -EINVAL;
785 966
967 if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
968 return hwtstamp_ioctl(dev, req, cmd);
969
786 return phy_mii_ioctl(port->phydev, req, cmd); 970 return phy_mii_ioctl(port->phydev, req, cmd);
787} 971}
788 972
@@ -1171,6 +1355,11 @@ static int __devinit eth_init_one(struct platform_device *pdev)
1171 char phy_id[MII_BUS_ID_SIZE + 3]; 1355 char phy_id[MII_BUS_ID_SIZE + 3];
1172 int err; 1356 int err;
1173 1357
1358 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
1359 pr_err("ixp4xx_eth: bad ptp filter\n");
1360 return -EINVAL;
1361 }
1362
1174 if (!(dev = alloc_etherdev(sizeof(struct port)))) 1363 if (!(dev = alloc_etherdev(sizeof(struct port))))
1175 return -ENOMEM; 1364 return -ENOMEM;
1176 1365
diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c
new file mode 100644
index 000000000000..d8e175382d1d
--- /dev/null
+++ b/drivers/net/gianfar_ptp.c
@@ -0,0 +1,588 @@
1/*
2 * PTP 1588 clock using the eTSEC
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/device.h>
21#include <linux/hrtimer.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/of.h>
27#include <linux/of_platform.h>
28#include <linux/timex.h>
29#include <linux/io.h>
30
31#include <linux/ptp_clock_kernel.h>
32
33#include "gianfar.h"
34
35/*
36 * gianfar ptp registers
37 * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
38 */
39struct gianfar_ptp_registers {
40 u32 tmr_ctrl; /* Timer control register */
41 u32 tmr_tevent; /* Timestamp event register */
42 u32 tmr_temask; /* Timer event mask register */
43 u32 tmr_pevent; /* Timestamp event register */
44 u32 tmr_pemask; /* Timer event mask register */
45 u32 tmr_stat; /* Timestamp status register */
46 u32 tmr_cnt_h; /* Timer counter high register */
47 u32 tmr_cnt_l; /* Timer counter low register */
48 u32 tmr_add; /* Timer drift compensation addend register */
49 u32 tmr_acc; /* Timer accumulator register */
50 u32 tmr_prsc; /* Timer prescale */
51 u8 res1[4];
52 u32 tmroff_h; /* Timer offset high */
53 u32 tmroff_l; /* Timer offset low */
54 u8 res2[8];
55 u32 tmr_alarm1_h; /* Timer alarm 1 high register */
56 u32 tmr_alarm1_l; /* Timer alarm 1 high register */
57 u32 tmr_alarm2_h; /* Timer alarm 2 high register */
58 u32 tmr_alarm2_l; /* Timer alarm 2 high register */
59 u8 res3[48];
60 u32 tmr_fiper1; /* Timer fixed period interval */
61 u32 tmr_fiper2; /* Timer fixed period interval */
62 u32 tmr_fiper3; /* Timer fixed period interval */
63 u8 res4[20];
64 u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
65 u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
66 u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
67 u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
68};
69
70/* Bit definitions for the TMR_CTRL register */
71#define ALM1P (1<<31) /* Alarm1 output polarity */
72#define ALM2P (1<<30) /* Alarm2 output polarity */
73#define FS (1<<28) /* FIPER start indication */
74#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
75#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
76#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
77#define TCLK_PERIOD_MASK (0x3ff)
78#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
79#define FRD (1<<14) /* FIPER Realignment Disable */
80#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
81#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
82#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
83#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
84#define COPH (1<<7) /* Generated clock output phase. */
85#define CIPH (1<<6) /* External oscillator input clock phase */
86#define TMSR (1<<5) /* Timer soft reset. */
87#define BYP (1<<3) /* Bypass drift compensated clock */
88#define TE (1<<2) /* 1588 timer enable. */
89#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
90#define CKSEL_MASK (0x3)
91
92/* Bit definitions for the TMR_TEVENT register */
93#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
94#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
95#define ALM2 (1<<17) /* Current time = alarm time register 2 */
96#define ALM1 (1<<16) /* Current time = alarm time register 1 */
97#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
98#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
99#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
100
101/* Bit definitions for the TMR_TEMASK register */
102#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
103#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
104#define ALM2EN (1<<17) /* Timer ALM2 event enable */
105#define ALM1EN (1<<16) /* Timer ALM1 event enable */
106#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
107#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
108
109/* Bit definitions for the TMR_PEVENT register */
110#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
111#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
112#define RXP (1<<0) /* PTP frame has been received */
113
114/* Bit definitions for the TMR_PEMASK register */
115#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
116#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
117#define RXPEN (1<<0) /* Receive PTP packet event enable */
118
119/* Bit definitions for the TMR_STAT register */
120#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
121#define STAT_VEC_MASK (0x3f)
122
123/* Bit definitions for the TMR_PRSC register */
124#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
125#define PRSC_OCK_MASK (0xffff)
126
127
128#define DRIVER "gianfar_ptp"
129#define DEFAULT_CKSEL 1
130#define N_ALARM 1 /* first alarm is used internally to reset fipers */
131#define N_EXT_TS 2
132#define REG_SIZE sizeof(struct gianfar_ptp_registers)
133
134struct etsects {
135 struct gianfar_ptp_registers *regs;
136 spinlock_t lock; /* protects regs */
137 struct ptp_clock *clock;
138 struct ptp_clock_info caps;
139 struct resource *rsrc;
140 int irq;
141 u64 alarm_interval; /* for periodic alarm */
142 u64 alarm_value;
143 u32 tclk_period; /* nanoseconds */
144 u32 tmr_prsc;
145 u32 tmr_add;
146 u32 cksel;
147 u32 tmr_fiper1;
148 u32 tmr_fiper2;
149};
150
151/*
152 * Register access functions
153 */
154
155/* Caller must hold etsects->lock. */
156static u64 tmr_cnt_read(struct etsects *etsects)
157{
158 u64 ns;
159 u32 lo, hi;
160
161 lo = gfar_read(&etsects->regs->tmr_cnt_l);
162 hi = gfar_read(&etsects->regs->tmr_cnt_h);
163 ns = ((u64) hi) << 32;
164 ns |= lo;
165 return ns;
166}
167
168/* Caller must hold etsects->lock. */
169static void tmr_cnt_write(struct etsects *etsects, u64 ns)
170{
171 u32 hi = ns >> 32;
172 u32 lo = ns & 0xffffffff;
173
174 gfar_write(&etsects->regs->tmr_cnt_l, lo);
175 gfar_write(&etsects->regs->tmr_cnt_h, hi);
176}
177
178/* Caller must hold etsects->lock. */
179static void set_alarm(struct etsects *etsects)
180{
181 u64 ns;
182 u32 lo, hi;
183
184 ns = tmr_cnt_read(etsects) + 1500000000ULL;
185 ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
186 ns -= etsects->tclk_period;
187 hi = ns >> 32;
188 lo = ns & 0xffffffff;
189 gfar_write(&etsects->regs->tmr_alarm1_l, lo);
190 gfar_write(&etsects->regs->tmr_alarm1_h, hi);
191}
192
193/* Caller must hold etsects->lock. */
194static void set_fipers(struct etsects *etsects)
195{
196 u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl);
197
198 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE));
199 gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
200 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
201 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
202 set_alarm(etsects);
203 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE);
204}
205
206/*
207 * Interrupt service routine
208 */
209
210static irqreturn_t isr(int irq, void *priv)
211{
212 struct etsects *etsects = priv;
213 struct ptp_clock_event event;
214 u64 ns;
215 u32 ack = 0, lo, hi, mask, val;
216
217 val = gfar_read(&etsects->regs->tmr_tevent);
218
219 if (val & ETS1) {
220 ack |= ETS1;
221 hi = gfar_read(&etsects->regs->tmr_etts1_h);
222 lo = gfar_read(&etsects->regs->tmr_etts1_l);
223 event.type = PTP_CLOCK_EXTTS;
224 event.index = 0;
225 event.timestamp = ((u64) hi) << 32;
226 event.timestamp |= lo;
227 ptp_clock_event(etsects->clock, &event);
228 }
229
230 if (val & ETS2) {
231 ack |= ETS2;
232 hi = gfar_read(&etsects->regs->tmr_etts2_h);
233 lo = gfar_read(&etsects->regs->tmr_etts2_l);
234 event.type = PTP_CLOCK_EXTTS;
235 event.index = 1;
236 event.timestamp = ((u64) hi) << 32;
237 event.timestamp |= lo;
238 ptp_clock_event(etsects->clock, &event);
239 }
240
241 if (val & ALM2) {
242 ack |= ALM2;
243 if (etsects->alarm_value) {
244 event.type = PTP_CLOCK_ALARM;
245 event.index = 0;
246 event.timestamp = etsects->alarm_value;
247 ptp_clock_event(etsects->clock, &event);
248 }
249 if (etsects->alarm_interval) {
250 ns = etsects->alarm_value + etsects->alarm_interval;
251 hi = ns >> 32;
252 lo = ns & 0xffffffff;
253 spin_lock(&etsects->lock);
254 gfar_write(&etsects->regs->tmr_alarm2_l, lo);
255 gfar_write(&etsects->regs->tmr_alarm2_h, hi);
256 spin_unlock(&etsects->lock);
257 etsects->alarm_value = ns;
258 } else {
259 gfar_write(&etsects->regs->tmr_tevent, ALM2);
260 spin_lock(&etsects->lock);
261 mask = gfar_read(&etsects->regs->tmr_temask);
262 mask &= ~ALM2EN;
263 gfar_write(&etsects->regs->tmr_temask, mask);
264 spin_unlock(&etsects->lock);
265 etsects->alarm_value = 0;
266 etsects->alarm_interval = 0;
267 }
268 }
269
270 if (val & PP1) {
271 ack |= PP1;
272 event.type = PTP_CLOCK_PPS;
273 ptp_clock_event(etsects->clock, &event);
274 }
275
276 if (ack) {
277 gfar_write(&etsects->regs->tmr_tevent, ack);
278 return IRQ_HANDLED;
279 } else
280 return IRQ_NONE;
281}
282
283/*
284 * PTP clock operations
285 */
286
287static int ptp_gianfar_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
288{
289 u64 adj;
290 u32 diff, tmr_add;
291 int neg_adj = 0;
292 struct etsects *etsects = container_of(ptp, struct etsects, caps);
293
294 if (ppb < 0) {
295 neg_adj = 1;
296 ppb = -ppb;
297 }
298 tmr_add = etsects->tmr_add;
299 adj = tmr_add;
300 adj *= ppb;
301 diff = div_u64(adj, 1000000000ULL);
302
303 tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
304
305 gfar_write(&etsects->regs->tmr_add, tmr_add);
306
307 return 0;
308}
309
310static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
311{
312 s64 now;
313 unsigned long flags;
314 struct etsects *etsects = container_of(ptp, struct etsects, caps);
315
316 spin_lock_irqsave(&etsects->lock, flags);
317
318 now = tmr_cnt_read(etsects);
319 now += delta;
320 tmr_cnt_write(etsects, now);
321
322 spin_unlock_irqrestore(&etsects->lock, flags);
323
324 set_fipers(etsects);
325
326 return 0;
327}
328
329static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
330{
331 u64 ns;
332 u32 remainder;
333 unsigned long flags;
334 struct etsects *etsects = container_of(ptp, struct etsects, caps);
335
336 spin_lock_irqsave(&etsects->lock, flags);
337
338 ns = tmr_cnt_read(etsects);
339
340 spin_unlock_irqrestore(&etsects->lock, flags);
341
342 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
343 ts->tv_nsec = remainder;
344 return 0;
345}
346
347static int ptp_gianfar_settime(struct ptp_clock_info *ptp,
348 const struct timespec *ts)
349{
350 u64 ns;
351 unsigned long flags;
352 struct etsects *etsects = container_of(ptp, struct etsects, caps);
353
354 ns = ts->tv_sec * 1000000000ULL;
355 ns += ts->tv_nsec;
356
357 spin_lock_irqsave(&etsects->lock, flags);
358
359 tmr_cnt_write(etsects, ns);
360 set_fipers(etsects);
361
362 spin_unlock_irqrestore(&etsects->lock, flags);
363
364 return 0;
365}
366
367static int ptp_gianfar_enable(struct ptp_clock_info *ptp,
368 struct ptp_clock_request *rq, int on)
369{
370 struct etsects *etsects = container_of(ptp, struct etsects, caps);
371 unsigned long flags;
372 u32 bit, mask;
373
374 switch (rq->type) {
375 case PTP_CLK_REQ_EXTTS:
376 switch (rq->extts.index) {
377 case 0:
378 bit = ETS1EN;
379 break;
380 case 1:
381 bit = ETS2EN;
382 break;
383 default:
384 return -EINVAL;
385 }
386 spin_lock_irqsave(&etsects->lock, flags);
387 mask = gfar_read(&etsects->regs->tmr_temask);
388 if (on)
389 mask |= bit;
390 else
391 mask &= ~bit;
392 gfar_write(&etsects->regs->tmr_temask, mask);
393 spin_unlock_irqrestore(&etsects->lock, flags);
394 return 0;
395
396 case PTP_CLK_REQ_PPS:
397 spin_lock_irqsave(&etsects->lock, flags);
398 mask = gfar_read(&etsects->regs->tmr_temask);
399 if (on)
400 mask |= PP1EN;
401 else
402 mask &= ~PP1EN;
403 gfar_write(&etsects->regs->tmr_temask, mask);
404 spin_unlock_irqrestore(&etsects->lock, flags);
405 return 0;
406
407 default:
408 break;
409 }
410
411 return -EOPNOTSUPP;
412}
413
414static struct ptp_clock_info ptp_gianfar_caps = {
415 .owner = THIS_MODULE,
416 .name = "gianfar clock",
417 .max_adj = 512000,
418 .n_alarm = N_ALARM,
419 .n_ext_ts = N_EXT_TS,
420 .n_per_out = 0,
421 .pps = 1,
422 .adjfreq = ptp_gianfar_adjfreq,
423 .adjtime = ptp_gianfar_adjtime,
424 .gettime = ptp_gianfar_gettime,
425 .settime = ptp_gianfar_settime,
426 .enable = ptp_gianfar_enable,
427};
428
429/* OF device tree */
430
431static int get_of_u32(struct device_node *node, char *str, u32 *val)
432{
433 int plen;
434 const u32 *prop = of_get_property(node, str, &plen);
435
436 if (!prop || plen != sizeof(*prop))
437 return -1;
438 *val = *prop;
439 return 0;
440}
441
442static int gianfar_ptp_probe(struct platform_device *dev)
443{
444 struct device_node *node = dev->dev.of_node;
445 struct etsects *etsects;
446 struct timespec now;
447 int err = -ENOMEM;
448 u32 tmr_ctrl;
449 unsigned long flags;
450
451 etsects = kzalloc(sizeof(*etsects), GFP_KERNEL);
452 if (!etsects)
453 goto no_memory;
454
455 err = -ENODEV;
456
457 etsects->caps = ptp_gianfar_caps;
458 etsects->cksel = DEFAULT_CKSEL;
459
460 if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
461 get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
462 get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) ||
463 get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
464 get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
465 get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) {
466 pr_err("device tree node missing required elements\n");
467 goto no_node;
468 }
469
470 etsects->irq = platform_get_irq(dev, 0);
471
472 if (etsects->irq == NO_IRQ) {
473 pr_err("irq not in device tree\n");
474 goto no_node;
475 }
476 if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) {
477 pr_err("request_irq failed\n");
478 goto no_node;
479 }
480
481 etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
482 if (!etsects->rsrc) {
483 pr_err("no resource\n");
484 goto no_resource;
485 }
486 if (request_resource(&ioport_resource, etsects->rsrc)) {
487 pr_err("resource busy\n");
488 goto no_resource;
489 }
490
491 spin_lock_init(&etsects->lock);
492
493 etsects->regs = ioremap(etsects->rsrc->start,
494 1 + etsects->rsrc->end - etsects->rsrc->start);
495 if (!etsects->regs) {
496 pr_err("ioremap ptp registers failed\n");
497 goto no_ioremap;
498 }
499 getnstimeofday(&now);
500 ptp_gianfar_settime(&etsects->caps, &now);
501
502 tmr_ctrl =
503 (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
504 (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT;
505
506 spin_lock_irqsave(&etsects->lock, flags);
507
508 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl);
509 gfar_write(&etsects->regs->tmr_add, etsects->tmr_add);
510 gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
511 gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
512 gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
513 set_alarm(etsects);
514 gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE);
515
516 spin_unlock_irqrestore(&etsects->lock, flags);
517
518 etsects->clock = ptp_clock_register(&etsects->caps);
519 if (IS_ERR(etsects->clock)) {
520 err = PTR_ERR(etsects->clock);
521 goto no_clock;
522 }
523
524 dev_set_drvdata(&dev->dev, etsects);
525
526 return 0;
527
528no_clock:
529no_ioremap:
530 release_resource(etsects->rsrc);
531no_resource:
532 free_irq(etsects->irq, etsects);
533no_node:
534 kfree(etsects);
535no_memory:
536 return err;
537}
538
539static int gianfar_ptp_remove(struct platform_device *dev)
540{
541 struct etsects *etsects = dev_get_drvdata(&dev->dev);
542
543 gfar_write(&etsects->regs->tmr_temask, 0);
544 gfar_write(&etsects->regs->tmr_ctrl, 0);
545
546 ptp_clock_unregister(etsects->clock);
547 iounmap(etsects->regs);
548 release_resource(etsects->rsrc);
549 free_irq(etsects->irq, etsects);
550 kfree(etsects);
551
552 return 0;
553}
554
555static struct of_device_id match_table[] = {
556 { .compatible = "fsl,etsec-ptp" },
557 {},
558};
559
560static struct platform_driver gianfar_ptp_driver = {
561 .driver = {
562 .name = "gianfar_ptp",
563 .of_match_table = match_table,
564 .owner = THIS_MODULE,
565 },
566 .probe = gianfar_ptp_probe,
567 .remove = gianfar_ptp_remove,
568};
569
570/* module operations */
571
572static int __init ptp_gianfar_init(void)
573{
574 return platform_driver_register(&gianfar_ptp_driver);
575}
576
577module_init(ptp_gianfar_init);
578
579static void __exit ptp_gianfar_exit(void)
580{
581 platform_driver_unregister(&gianfar_ptp_driver);
582}
583
584module_exit(ptp_gianfar_exit);
585
586MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
587MODULE_DESCRIPTION("PTP clock using the eTSEC");
588MODULE_LICENSE("GPL");
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 96c95617195f..32f07f868d89 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -915,7 +915,7 @@ static void ioc3_alloc_rings(struct net_device *dev)
915 915
916 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 916 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
917 if (!skb) { 917 if (!skb) {
918 show_free_areas(); 918 show_free_areas(0);
919 continue; 919 continue;
920 } 920 }
921 921
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 13bebab65d02..2333215bbb32 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_FIXED_PHY) += fixed.o
19obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o 19obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o 20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
21obj-$(CONFIG_NATIONAL_PHY) += national.o 21obj-$(CONFIG_NATIONAL_PHY) += national.o
22obj-$(CONFIG_DP83640_PHY) += dp83640.o
22obj-$(CONFIG_STE10XP) += ste10Xp.o 23obj-$(CONFIG_STE10XP) += ste10Xp.o
23obj-$(CONFIG_MICREL_PHY) += micrel.o 24obj-$(CONFIG_MICREL_PHY) += micrel.o
24obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o 25obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
new file mode 100644
index 000000000000..b0c9522bb535
--- /dev/null
+++ b/drivers/net/phy/dp83640.c
@@ -0,0 +1,1100 @@
1/*
2 * Driver for the National Semiconductor DP83640 PHYTER
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/ethtool.h>
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/mii.h>
24#include <linux/module.h>
25#include <linux/net_tstamp.h>
26#include <linux/netdevice.h>
27#include <linux/phy.h>
28#include <linux/ptp_classify.h>
29#include <linux/ptp_clock_kernel.h>
30
31#include "dp83640_reg.h"
32
33#define DP83640_PHY_ID 0x20005ce1
34#define PAGESEL 0x13
35#define LAYER4 0x02
36#define LAYER2 0x01
37#define MAX_RXTS 4
38#define MAX_TXTS 4
39#define N_EXT_TS 1
40#define PSF_PTPVER 2
41#define PSF_EVNT 0x4000
42#define PSF_RX 0x2000
43#define PSF_TX 0x1000
44#define EXT_EVENT 1
45#define EXT_GPIO 1
46#define CAL_EVENT 2
47#define CAL_GPIO 9
48#define CAL_TRIGGER 2
49
50/* phyter seems to miss the mark by 16 ns */
51#define ADJTIME_FIX 16
52
53#if defined(__BIG_ENDIAN)
54#define ENDIAN_FLAG 0
55#elif defined(__LITTLE_ENDIAN)
56#define ENDIAN_FLAG PSF_ENDIAN
57#endif
58
59#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
60
61struct phy_rxts {
62 u16 ns_lo; /* ns[15:0] */
63 u16 ns_hi; /* overflow[1:0], ns[29:16] */
64 u16 sec_lo; /* sec[15:0] */
65 u16 sec_hi; /* sec[31:16] */
66 u16 seqid; /* sequenceId[15:0] */
67 u16 msgtype; /* messageType[3:0], hash[11:0] */
68};
69
70struct phy_txts {
71 u16 ns_lo; /* ns[15:0] */
72 u16 ns_hi; /* overflow[1:0], ns[29:16] */
73 u16 sec_lo; /* sec[15:0] */
74 u16 sec_hi; /* sec[31:16] */
75};
76
77struct rxts {
78 struct list_head list;
79 unsigned long tmo;
80 u64 ns;
81 u16 seqid;
82 u8 msgtype;
83 u16 hash;
84};
85
86struct dp83640_clock;
87
88struct dp83640_private {
89 struct list_head list;
90 struct dp83640_clock *clock;
91 struct phy_device *phydev;
92 struct work_struct ts_work;
93 int hwts_tx_en;
94 int hwts_rx_en;
95 int layer;
96 int version;
97 /* remember state of cfg0 during calibration */
98 int cfg0;
99 /* remember the last event time stamp */
100 struct phy_txts edata;
101 /* list of rx timestamps */
102 struct list_head rxts;
103 struct list_head rxpool;
104 struct rxts rx_pool_data[MAX_RXTS];
105 /* protects above three fields from concurrent access */
106 spinlock_t rx_lock;
107 /* queues of incoming and outgoing packets */
108 struct sk_buff_head rx_queue;
109 struct sk_buff_head tx_queue;
110};
111
112struct dp83640_clock {
113 /* keeps the instance in the 'phyter_clocks' list */
114 struct list_head list;
115 /* we create one clock instance per MII bus */
116 struct mii_bus *bus;
117 /* protects extended registers from concurrent access */
118 struct mutex extreg_lock;
119 /* remembers which page was last selected */
120 int page;
121 /* our advertised capabilities */
122 struct ptp_clock_info caps;
123 /* protects the three fields below from concurrent access */
124 struct mutex clock_lock;
125 /* the one phyter from which we shall read */
126 struct dp83640_private *chosen;
127 /* list of the other attached phyters, not chosen */
128 struct list_head phylist;
129 /* reference to our PTP hardware clock */
130 struct ptp_clock *ptp_clock;
131};
132
133/* globals */
134
135static int chosen_phy = -1;
136static ushort cal_gpio = 4;
137
138module_param(chosen_phy, int, 0444);
139module_param(cal_gpio, ushort, 0444);
140
141MODULE_PARM_DESC(chosen_phy, \
142 "The address of the PHY to use for the ancillary clock features");
143MODULE_PARM_DESC(cal_gpio, \
144 "Which GPIO line to use for synchronizing multiple PHYs");
145
146/* a list of clocks and a mutex to protect it */
147static LIST_HEAD(phyter_clocks);
148static DEFINE_MUTEX(phyter_clocks_lock);
149
150static void rx_timestamp_work(struct work_struct *work);
151
152/* extended register access functions */
153
154#define BROADCAST_ADDR 31
155
156static inline int broadcast_write(struct mii_bus *bus, u32 regnum, u16 val)
157{
158 return mdiobus_write(bus, BROADCAST_ADDR, regnum, val);
159}
160
161/* Caller must hold extreg_lock. */
162static int ext_read(struct phy_device *phydev, int page, u32 regnum)
163{
164 struct dp83640_private *dp83640 = phydev->priv;
165 int val;
166
167 if (dp83640->clock->page != page) {
168 broadcast_write(phydev->bus, PAGESEL, page);
169 dp83640->clock->page = page;
170 }
171 val = phy_read(phydev, regnum);
172
173 return val;
174}
175
176/* Caller must hold extreg_lock. */
177static void ext_write(int broadcast, struct phy_device *phydev,
178 int page, u32 regnum, u16 val)
179{
180 struct dp83640_private *dp83640 = phydev->priv;
181
182 if (dp83640->clock->page != page) {
183 broadcast_write(phydev->bus, PAGESEL, page);
184 dp83640->clock->page = page;
185 }
186 if (broadcast)
187 broadcast_write(phydev->bus, regnum, val);
188 else
189 phy_write(phydev, regnum, val);
190}
191
192/* Caller must hold extreg_lock. */
193static int tdr_write(int bc, struct phy_device *dev,
194 const struct timespec *ts, u16 cmd)
195{
196 ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec & 0xffff);/* ns[15:0] */
197 ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec >> 16); /* ns[31:16] */
198 ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_sec & 0xffff); /* sec[15:0] */
199 ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_sec >> 16); /* sec[31:16]*/
200
201 ext_write(bc, dev, PAGE4, PTP_CTL, cmd);
202
203 return 0;
204}
205
206/* convert phy timestamps into driver timestamps */
207
208static void phy2rxts(struct phy_rxts *p, struct rxts *rxts)
209{
210 u32 sec;
211
212 sec = p->sec_lo;
213 sec |= p->sec_hi << 16;
214
215 rxts->ns = p->ns_lo;
216 rxts->ns |= (p->ns_hi & 0x3fff) << 16;
217 rxts->ns += ((u64)sec) * 1000000000ULL;
218 rxts->seqid = p->seqid;
219 rxts->msgtype = (p->msgtype >> 12) & 0xf;
220 rxts->hash = p->msgtype & 0x0fff;
221 rxts->tmo = jiffies + HZ;
222}
223
224static u64 phy2txts(struct phy_txts *p)
225{
226 u64 ns;
227 u32 sec;
228
229 sec = p->sec_lo;
230 sec |= p->sec_hi << 16;
231
232 ns = p->ns_lo;
233 ns |= (p->ns_hi & 0x3fff) << 16;
234 ns += ((u64)sec) * 1000000000ULL;
235
236 return ns;
237}
238
239/* ptp clock methods */
240
241static int ptp_dp83640_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
242{
243 struct dp83640_clock *clock =
244 container_of(ptp, struct dp83640_clock, caps);
245 struct phy_device *phydev = clock->chosen->phydev;
246 u64 rate;
247 int neg_adj = 0;
248 u16 hi, lo;
249
250 if (ppb < 0) {
251 neg_adj = 1;
252 ppb = -ppb;
253 }
254 rate = ppb;
255 rate <<= 26;
256 rate = div_u64(rate, 1953125);
257
258 hi = (rate >> 16) & PTP_RATE_HI_MASK;
259 if (neg_adj)
260 hi |= PTP_RATE_DIR;
261
262 lo = rate & 0xffff;
263
264 mutex_lock(&clock->extreg_lock);
265
266 ext_write(1, phydev, PAGE4, PTP_RATEH, hi);
267 ext_write(1, phydev, PAGE4, PTP_RATEL, lo);
268
269 mutex_unlock(&clock->extreg_lock);
270
271 return 0;
272}
273
274static int ptp_dp83640_adjtime(struct ptp_clock_info *ptp, s64 delta)
275{
276 struct dp83640_clock *clock =
277 container_of(ptp, struct dp83640_clock, caps);
278 struct phy_device *phydev = clock->chosen->phydev;
279 struct timespec ts;
280 int err;
281
282 delta += ADJTIME_FIX;
283
284 ts = ns_to_timespec(delta);
285
286 mutex_lock(&clock->extreg_lock);
287
288 err = tdr_write(1, phydev, &ts, PTP_STEP_CLK);
289
290 mutex_unlock(&clock->extreg_lock);
291
292 return err;
293}
294
295static int ptp_dp83640_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
296{
297 struct dp83640_clock *clock =
298 container_of(ptp, struct dp83640_clock, caps);
299 struct phy_device *phydev = clock->chosen->phydev;
300 unsigned int val[4];
301
302 mutex_lock(&clock->extreg_lock);
303
304 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_RD_CLK);
305
306 val[0] = ext_read(phydev, PAGE4, PTP_TDR); /* ns[15:0] */
307 val[1] = ext_read(phydev, PAGE4, PTP_TDR); /* ns[31:16] */
308 val[2] = ext_read(phydev, PAGE4, PTP_TDR); /* sec[15:0] */
309 val[3] = ext_read(phydev, PAGE4, PTP_TDR); /* sec[31:16] */
310
311 mutex_unlock(&clock->extreg_lock);
312
313 ts->tv_nsec = val[0] | (val[1] << 16);
314 ts->tv_sec = val[2] | (val[3] << 16);
315
316 return 0;
317}
318
319static int ptp_dp83640_settime(struct ptp_clock_info *ptp,
320 const struct timespec *ts)
321{
322 struct dp83640_clock *clock =
323 container_of(ptp, struct dp83640_clock, caps);
324 struct phy_device *phydev = clock->chosen->phydev;
325 int err;
326
327 mutex_lock(&clock->extreg_lock);
328
329 err = tdr_write(1, phydev, ts, PTP_LOAD_CLK);
330
331 mutex_unlock(&clock->extreg_lock);
332
333 return err;
334}
335
336static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
337 struct ptp_clock_request *rq, int on)
338{
339 struct dp83640_clock *clock =
340 container_of(ptp, struct dp83640_clock, caps);
341 struct phy_device *phydev = clock->chosen->phydev;
342 u16 evnt;
343
344 switch (rq->type) {
345 case PTP_CLK_REQ_EXTTS:
346 if (rq->extts.index != 0)
347 return -EINVAL;
348 evnt = EVNT_WR | (EXT_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
349 if (on) {
350 evnt |= (EXT_GPIO & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
351 evnt |= EVNT_RISE;
352 }
353 ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
354 return 0;
355 default:
356 break;
357 }
358
359 return -EOPNOTSUPP;
360}
361
362static u8 status_frame_dst[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
363static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
364
365static void enable_status_frames(struct phy_device *phydev, bool on)
366{
367 u16 cfg0 = 0, ver;
368
369 if (on)
370 cfg0 = PSF_EVNT_EN | PSF_RXTS_EN | PSF_TXTS_EN | ENDIAN_FLAG;
371
372 ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
373
374 ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
375 ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
376
377 if (!phydev->attached_dev) {
378 pr_warning("dp83640: expected to find an attached netdevice\n");
379 return;
380 }
381
382 if (on) {
383 if (dev_mc_add(phydev->attached_dev, status_frame_dst))
384 pr_warning("dp83640: failed to add mc address\n");
385 } else {
386 if (dev_mc_del(phydev->attached_dev, status_frame_dst))
387 pr_warning("dp83640: failed to delete mc address\n");
388 }
389}
390
391static bool is_status_frame(struct sk_buff *skb, int type)
392{
393 struct ethhdr *h = eth_hdr(skb);
394
395 if (PTP_CLASS_V2_L2 == type &&
396 !memcmp(h->h_source, status_frame_src, sizeof(status_frame_src)))
397 return true;
398 else
399 return false;
400}
401
402static int expired(struct rxts *rxts)
403{
404 return time_after(jiffies, rxts->tmo);
405}
406
407/* Caller must hold rx_lock. */
408static void prune_rx_ts(struct dp83640_private *dp83640)
409{
410 struct list_head *this, *next;
411 struct rxts *rxts;
412
413 list_for_each_safe(this, next, &dp83640->rxts) {
414 rxts = list_entry(this, struct rxts, list);
415 if (expired(rxts)) {
416 list_del_init(&rxts->list);
417 list_add(&rxts->list, &dp83640->rxpool);
418 }
419 }
420}
421
422/* synchronize the phyters so they act as one clock */
423
424static void enable_broadcast(struct phy_device *phydev, int init_page, int on)
425{
426 int val;
427 phy_write(phydev, PAGESEL, 0);
428 val = phy_read(phydev, PHYCR2);
429 if (on)
430 val |= BC_WRITE;
431 else
432 val &= ~BC_WRITE;
433 phy_write(phydev, PHYCR2, val);
434 phy_write(phydev, PAGESEL, init_page);
435}
436
437static void recalibrate(struct dp83640_clock *clock)
438{
439 s64 now, diff;
440 struct phy_txts event_ts;
441 struct timespec ts;
442 struct list_head *this;
443 struct dp83640_private *tmp;
444 struct phy_device *master = clock->chosen->phydev;
445 u16 cfg0, evnt, ptp_trig, trigger, val;
446
447 trigger = CAL_TRIGGER;
448
449 mutex_lock(&clock->extreg_lock);
450
451 /*
452 * enable broadcast, disable status frames, enable ptp clock
453 */
454 list_for_each(this, &clock->phylist) {
455 tmp = list_entry(this, struct dp83640_private, list);
456 enable_broadcast(tmp->phydev, clock->page, 1);
457 tmp->cfg0 = ext_read(tmp->phydev, PAGE5, PSF_CFG0);
458 ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, 0);
459 ext_write(0, tmp->phydev, PAGE4, PTP_CTL, PTP_ENABLE);
460 }
461 enable_broadcast(master, clock->page, 1);
462 cfg0 = ext_read(master, PAGE5, PSF_CFG0);
463 ext_write(0, master, PAGE5, PSF_CFG0, 0);
464 ext_write(0, master, PAGE4, PTP_CTL, PTP_ENABLE);
465
466 /*
467 * enable an event timestamp
468 */
469 evnt = EVNT_WR | EVNT_RISE | EVNT_SINGLE;
470 evnt |= (CAL_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
471 evnt |= (cal_gpio & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
472
473 list_for_each(this, &clock->phylist) {
474 tmp = list_entry(this, struct dp83640_private, list);
475 ext_write(0, tmp->phydev, PAGE5, PTP_EVNT, evnt);
476 }
477 ext_write(0, master, PAGE5, PTP_EVNT, evnt);
478
479 /*
480 * configure a trigger
481 */
482 ptp_trig = TRIG_WR | TRIG_IF_LATE | TRIG_PULSE;
483 ptp_trig |= (trigger & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT;
484 ptp_trig |= (cal_gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT;
485 ext_write(0, master, PAGE5, PTP_TRIG, ptp_trig);
486
487 /* load trigger */
488 val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
489 val |= TRIG_LOAD;
490 ext_write(0, master, PAGE4, PTP_CTL, val);
491
492 /* enable trigger */
493 val &= ~TRIG_LOAD;
494 val |= TRIG_EN;
495 ext_write(0, master, PAGE4, PTP_CTL, val);
496
497 /* disable trigger */
498 val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT;
499 val |= TRIG_DIS;
500 ext_write(0, master, PAGE4, PTP_CTL, val);
501
502 /*
503 * read out and correct offsets
504 */
505 val = ext_read(master, PAGE4, PTP_STS);
506 pr_info("master PTP_STS 0x%04hx", val);
507 val = ext_read(master, PAGE4, PTP_ESTS);
508 pr_info("master PTP_ESTS 0x%04hx", val);
509 event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA);
510 event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA);
511 event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
512 event_ts.sec_hi = ext_read(master, PAGE4, PTP_EDATA);
513 now = phy2txts(&event_ts);
514
515 list_for_each(this, &clock->phylist) {
516 tmp = list_entry(this, struct dp83640_private, list);
517 val = ext_read(tmp->phydev, PAGE4, PTP_STS);
518 pr_info("slave PTP_STS 0x%04hx", val);
519 val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
520 pr_info("slave PTP_ESTS 0x%04hx", val);
521 event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
522 event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
523 event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
524 event_ts.sec_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
525 diff = now - (s64) phy2txts(&event_ts);
526 pr_info("slave offset %lld nanoseconds\n", diff);
527 diff += ADJTIME_FIX;
528 ts = ns_to_timespec(diff);
529 tdr_write(0, tmp->phydev, &ts, PTP_STEP_CLK);
530 }
531
532 /*
533 * restore status frames
534 */
535 list_for_each(this, &clock->phylist) {
536 tmp = list_entry(this, struct dp83640_private, list);
537 ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, tmp->cfg0);
538 }
539 ext_write(0, master, PAGE5, PSF_CFG0, cfg0);
540
541 mutex_unlock(&clock->extreg_lock);
542}
543
544/* time stamping methods */
545
546static void decode_evnt(struct dp83640_private *dp83640,
547 struct phy_txts *phy_txts, u16 ests)
548{
549 struct ptp_clock_event event;
550 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
551
552 switch (words) { /* fall through in every case */
553 case 3:
554 dp83640->edata.sec_hi = phy_txts->sec_hi;
555 case 2:
556 dp83640->edata.sec_lo = phy_txts->sec_lo;
557 case 1:
558 dp83640->edata.ns_hi = phy_txts->ns_hi;
559 case 0:
560 dp83640->edata.ns_lo = phy_txts->ns_lo;
561 }
562
563 event.type = PTP_CLOCK_EXTTS;
564 event.index = 0;
565 event.timestamp = phy2txts(&dp83640->edata);
566
567 ptp_clock_event(dp83640->clock->ptp_clock, &event);
568}
569
570static void decode_rxts(struct dp83640_private *dp83640,
571 struct phy_rxts *phy_rxts)
572{
573 struct rxts *rxts;
574 unsigned long flags;
575
576 spin_lock_irqsave(&dp83640->rx_lock, flags);
577
578 prune_rx_ts(dp83640);
579
580 if (list_empty(&dp83640->rxpool)) {
581 pr_warning("dp83640: rx timestamp pool is empty\n");
582 goto out;
583 }
584 rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
585 list_del_init(&rxts->list);
586 phy2rxts(phy_rxts, rxts);
587 list_add_tail(&rxts->list, &dp83640->rxts);
588out:
589 spin_unlock_irqrestore(&dp83640->rx_lock, flags);
590}
591
592static void decode_txts(struct dp83640_private *dp83640,
593 struct phy_txts *phy_txts)
594{
595 struct skb_shared_hwtstamps shhwtstamps;
596 struct sk_buff *skb;
597 u64 ns;
598
599 /* We must already have the skb that triggered this. */
600
601 skb = skb_dequeue(&dp83640->tx_queue);
602
603 if (!skb) {
604 pr_warning("dp83640: have timestamp but tx_queue empty\n");
605 return;
606 }
607 ns = phy2txts(phy_txts);
608 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
609 shhwtstamps.hwtstamp = ns_to_ktime(ns);
610 skb_complete_tx_timestamp(skb, &shhwtstamps);
611}
612
613static void decode_status_frame(struct dp83640_private *dp83640,
614 struct sk_buff *skb)
615{
616 struct phy_rxts *phy_rxts;
617 struct phy_txts *phy_txts;
618 u8 *ptr;
619 int len, size;
620 u16 ests, type;
621
622 ptr = skb->data + 2;
623
624 for (len = skb_headlen(skb) - 2; len > sizeof(type); len -= size) {
625
626 type = *(u16 *)ptr;
627 ests = type & 0x0fff;
628 type = type & 0xf000;
629 len -= sizeof(type);
630 ptr += sizeof(type);
631
632 if (PSF_RX == type && len >= sizeof(*phy_rxts)) {
633
634 phy_rxts = (struct phy_rxts *) ptr;
635 decode_rxts(dp83640, phy_rxts);
636 size = sizeof(*phy_rxts);
637
638 } else if (PSF_TX == type && len >= sizeof(*phy_txts)) {
639
640 phy_txts = (struct phy_txts *) ptr;
641 decode_txts(dp83640, phy_txts);
642 size = sizeof(*phy_txts);
643
644 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
645
646 phy_txts = (struct phy_txts *) ptr;
647 decode_evnt(dp83640, phy_txts, ests);
648 size = sizeof(*phy_txts);
649
650 } else {
651 size = 0;
652 break;
653 }
654 ptr += size;
655 }
656}
657
658static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
659{
660 u16 *seqid;
661 unsigned int offset;
662 u8 *msgtype, *data = skb_mac_header(skb);
663
664 /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
665
666 switch (type) {
667 case PTP_CLASS_V1_IPV4:
668 case PTP_CLASS_V2_IPV4:
669 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
670 break;
671 case PTP_CLASS_V1_IPV6:
672 case PTP_CLASS_V2_IPV6:
673 offset = OFF_PTP6;
674 break;
675 case PTP_CLASS_V2_L2:
676 offset = ETH_HLEN;
677 break;
678 case PTP_CLASS_V2_VLAN:
679 offset = ETH_HLEN + VLAN_HLEN;
680 break;
681 default:
682 return 0;
683 }
684
685 if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
686 return 0;
687
688 if (unlikely(type & PTP_CLASS_V1))
689 msgtype = data + offset + OFF_PTP_CONTROL;
690 else
691 msgtype = data + offset;
692
693 seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
694
695 return (rxts->msgtype == (*msgtype & 0xf) &&
696 rxts->seqid == ntohs(*seqid));
697}
698
699static void dp83640_free_clocks(void)
700{
701 struct dp83640_clock *clock;
702 struct list_head *this, *next;
703
704 mutex_lock(&phyter_clocks_lock);
705
706 list_for_each_safe(this, next, &phyter_clocks) {
707 clock = list_entry(this, struct dp83640_clock, list);
708 if (!list_empty(&clock->phylist)) {
709 pr_warning("phy list non-empty while unloading");
710 BUG();
711 }
712 list_del(&clock->list);
713 mutex_destroy(&clock->extreg_lock);
714 mutex_destroy(&clock->clock_lock);
715 put_device(&clock->bus->dev);
716 kfree(clock);
717 }
718
719 mutex_unlock(&phyter_clocks_lock);
720}
721
722static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
723{
724 INIT_LIST_HEAD(&clock->list);
725 clock->bus = bus;
726 mutex_init(&clock->extreg_lock);
727 mutex_init(&clock->clock_lock);
728 INIT_LIST_HEAD(&clock->phylist);
729 clock->caps.owner = THIS_MODULE;
730 sprintf(clock->caps.name, "dp83640 timer");
731 clock->caps.max_adj = 1953124;
732 clock->caps.n_alarm = 0;
733 clock->caps.n_ext_ts = N_EXT_TS;
734 clock->caps.n_per_out = 0;
735 clock->caps.pps = 0;
736 clock->caps.adjfreq = ptp_dp83640_adjfreq;
737 clock->caps.adjtime = ptp_dp83640_adjtime;
738 clock->caps.gettime = ptp_dp83640_gettime;
739 clock->caps.settime = ptp_dp83640_settime;
740 clock->caps.enable = ptp_dp83640_enable;
741 /*
742 * Get a reference to this bus instance.
743 */
744 get_device(&bus->dev);
745}
746
747static int choose_this_phy(struct dp83640_clock *clock,
748 struct phy_device *phydev)
749{
750 if (chosen_phy == -1 && !clock->chosen)
751 return 1;
752
753 if (chosen_phy == phydev->addr)
754 return 1;
755
756 return 0;
757}
758
759static struct dp83640_clock *dp83640_clock_get(struct dp83640_clock *clock)
760{
761 if (clock)
762 mutex_lock(&clock->clock_lock);
763 return clock;
764}
765
766/*
767 * Look up and lock a clock by bus instance.
768 * If there is no clock for this bus, then create it first.
769 */
770static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
771{
772 struct dp83640_clock *clock = NULL, *tmp;
773 struct list_head *this;
774
775 mutex_lock(&phyter_clocks_lock);
776
777 list_for_each(this, &phyter_clocks) {
778 tmp = list_entry(this, struct dp83640_clock, list);
779 if (tmp->bus == bus) {
780 clock = tmp;
781 break;
782 }
783 }
784 if (clock)
785 goto out;
786
787 clock = kzalloc(sizeof(struct dp83640_clock), GFP_KERNEL);
788 if (!clock)
789 goto out;
790
791 dp83640_clock_init(clock, bus);
792 list_add_tail(&phyter_clocks, &clock->list);
793out:
794 mutex_unlock(&phyter_clocks_lock);
795
796 return dp83640_clock_get(clock);
797}
798
799static void dp83640_clock_put(struct dp83640_clock *clock)
800{
801 mutex_unlock(&clock->clock_lock);
802}
803
804static int dp83640_probe(struct phy_device *phydev)
805{
806 struct dp83640_clock *clock;
807 struct dp83640_private *dp83640;
808 int err = -ENOMEM, i;
809
810 if (phydev->addr == BROADCAST_ADDR)
811 return 0;
812
813 clock = dp83640_clock_get_bus(phydev->bus);
814 if (!clock)
815 goto no_clock;
816
817 dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL);
818 if (!dp83640)
819 goto no_memory;
820
821 dp83640->phydev = phydev;
822 INIT_WORK(&dp83640->ts_work, rx_timestamp_work);
823
824 INIT_LIST_HEAD(&dp83640->rxts);
825 INIT_LIST_HEAD(&dp83640->rxpool);
826 for (i = 0; i < MAX_RXTS; i++)
827 list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
828
829 phydev->priv = dp83640;
830
831 spin_lock_init(&dp83640->rx_lock);
832 skb_queue_head_init(&dp83640->rx_queue);
833 skb_queue_head_init(&dp83640->tx_queue);
834
835 dp83640->clock = clock;
836
837 if (choose_this_phy(clock, phydev)) {
838 clock->chosen = dp83640;
839 clock->ptp_clock = ptp_clock_register(&clock->caps);
840 if (IS_ERR(clock->ptp_clock)) {
841 err = PTR_ERR(clock->ptp_clock);
842 goto no_register;
843 }
844 } else
845 list_add_tail(&dp83640->list, &clock->phylist);
846
847 if (clock->chosen && !list_empty(&clock->phylist))
848 recalibrate(clock);
849 else
850 enable_broadcast(dp83640->phydev, clock->page, 1);
851
852 dp83640_clock_put(clock);
853 return 0;
854
855no_register:
856 clock->chosen = NULL;
857 kfree(dp83640);
858no_memory:
859 dp83640_clock_put(clock);
860no_clock:
861 return err;
862}
863
864static void dp83640_remove(struct phy_device *phydev)
865{
866 struct dp83640_clock *clock;
867 struct list_head *this, *next;
868 struct dp83640_private *tmp, *dp83640 = phydev->priv;
869
870 if (phydev->addr == BROADCAST_ADDR)
871 return;
872
873 enable_status_frames(phydev, false);
874 cancel_work_sync(&dp83640->ts_work);
875
876 clock = dp83640_clock_get(dp83640->clock);
877
878 if (dp83640 == clock->chosen) {
879 ptp_clock_unregister(clock->ptp_clock);
880 clock->chosen = NULL;
881 } else {
882 list_for_each_safe(this, next, &clock->phylist) {
883 tmp = list_entry(this, struct dp83640_private, list);
884 if (tmp == dp83640) {
885 list_del_init(&tmp->list);
886 break;
887 }
888 }
889 }
890
891 dp83640_clock_put(clock);
892 kfree(dp83640);
893}
894
895static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
896{
897 struct dp83640_private *dp83640 = phydev->priv;
898 struct hwtstamp_config cfg;
899 u16 txcfg0, rxcfg0;
900
901 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
902 return -EFAULT;
903
904 if (cfg.flags) /* reserved for future extensions */
905 return -EINVAL;
906
907 switch (cfg.tx_type) {
908 case HWTSTAMP_TX_OFF:
909 dp83640->hwts_tx_en = 0;
910 break;
911 case HWTSTAMP_TX_ON:
912 dp83640->hwts_tx_en = 1;
913 break;
914 default:
915 return -ERANGE;
916 }
917
918 switch (cfg.rx_filter) {
919 case HWTSTAMP_FILTER_NONE:
920 dp83640->hwts_rx_en = 0;
921 dp83640->layer = 0;
922 dp83640->version = 0;
923 break;
924 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
925 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
926 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
927 dp83640->hwts_rx_en = 1;
928 dp83640->layer = LAYER4;
929 dp83640->version = 1;
930 break;
931 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
932 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
933 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
934 dp83640->hwts_rx_en = 1;
935 dp83640->layer = LAYER4;
936 dp83640->version = 2;
937 break;
938 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
939 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
940 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
941 dp83640->hwts_rx_en = 1;
942 dp83640->layer = LAYER2;
943 dp83640->version = 2;
944 break;
945 case HWTSTAMP_FILTER_PTP_V2_EVENT:
946 case HWTSTAMP_FILTER_PTP_V2_SYNC:
947 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
948 dp83640->hwts_rx_en = 1;
949 dp83640->layer = LAYER4|LAYER2;
950 dp83640->version = 2;
951 break;
952 default:
953 return -ERANGE;
954 }
955
956 txcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT;
957 rxcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT;
958
959 if (dp83640->layer & LAYER2) {
960 txcfg0 |= TX_L2_EN;
961 rxcfg0 |= RX_L2_EN;
962 }
963 if (dp83640->layer & LAYER4) {
964 txcfg0 |= TX_IPV6_EN | TX_IPV4_EN;
965 rxcfg0 |= RX_IPV6_EN | RX_IPV4_EN;
966 }
967
968 if (dp83640->hwts_tx_en)
969 txcfg0 |= TX_TS_EN;
970
971 if (dp83640->hwts_rx_en)
972 rxcfg0 |= RX_TS_EN;
973
974 mutex_lock(&dp83640->clock->extreg_lock);
975
976 if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
977 enable_status_frames(phydev, true);
978 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
979 }
980
981 ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
982 ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
983
984 mutex_unlock(&dp83640->clock->extreg_lock);
985
986 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
987}
988
989static void rx_timestamp_work(struct work_struct *work)
990{
991 struct dp83640_private *dp83640 =
992 container_of(work, struct dp83640_private, ts_work);
993 struct list_head *this, *next;
994 struct rxts *rxts;
995 struct skb_shared_hwtstamps *shhwtstamps;
996 struct sk_buff *skb;
997 unsigned int type;
998 unsigned long flags;
999
1000 /* Deliver each deferred packet, with or without a time stamp. */
1001
1002 while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL) {
1003 type = SKB_PTP_TYPE(skb);
1004 spin_lock_irqsave(&dp83640->rx_lock, flags);
1005 list_for_each_safe(this, next, &dp83640->rxts) {
1006 rxts = list_entry(this, struct rxts, list);
1007 if (match(skb, type, rxts)) {
1008 shhwtstamps = skb_hwtstamps(skb);
1009 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
1010 shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns);
1011 list_del_init(&rxts->list);
1012 list_add(&rxts->list, &dp83640->rxpool);
1013 break;
1014 }
1015 }
1016 spin_unlock_irqrestore(&dp83640->rx_lock, flags);
1017 netif_rx(skb);
1018 }
1019
1020 /* Clear out expired time stamps. */
1021
1022 spin_lock_irqsave(&dp83640->rx_lock, flags);
1023 prune_rx_ts(dp83640);
1024 spin_unlock_irqrestore(&dp83640->rx_lock, flags);
1025}
1026
1027static bool dp83640_rxtstamp(struct phy_device *phydev,
1028 struct sk_buff *skb, int type)
1029{
1030 struct dp83640_private *dp83640 = phydev->priv;
1031
1032 if (!dp83640->hwts_rx_en)
1033 return false;
1034
1035 if (is_status_frame(skb, type)) {
1036 decode_status_frame(dp83640, skb);
1037 /* Let the stack drop this frame. */
1038 return false;
1039 }
1040
1041 SKB_PTP_TYPE(skb) = type;
1042 skb_queue_tail(&dp83640->rx_queue, skb);
1043 schedule_work(&dp83640->ts_work);
1044
1045 return true;
1046}
1047
1048static void dp83640_txtstamp(struct phy_device *phydev,
1049 struct sk_buff *skb, int type)
1050{
1051 struct dp83640_private *dp83640 = phydev->priv;
1052
1053 if (!dp83640->hwts_tx_en) {
1054 kfree_skb(skb);
1055 return;
1056 }
1057 skb_queue_tail(&dp83640->tx_queue, skb);
1058 schedule_work(&dp83640->ts_work);
1059}
1060
1061static struct phy_driver dp83640_driver = {
1062 .phy_id = DP83640_PHY_ID,
1063 .phy_id_mask = 0xfffffff0,
1064 .name = "NatSemi DP83640",
1065 .features = PHY_BASIC_FEATURES,
1066 .flags = 0,
1067 .probe = dp83640_probe,
1068 .remove = dp83640_remove,
1069 .config_aneg = genphy_config_aneg,
1070 .read_status = genphy_read_status,
1071 .hwtstamp = dp83640_hwtstamp,
1072 .rxtstamp = dp83640_rxtstamp,
1073 .txtstamp = dp83640_txtstamp,
1074 .driver = {.owner = THIS_MODULE,}
1075};
1076
1077static int __init dp83640_init(void)
1078{
1079 return phy_driver_register(&dp83640_driver);
1080}
1081
1082static void __exit dp83640_exit(void)
1083{
1084 dp83640_free_clocks();
1085 phy_driver_unregister(&dp83640_driver);
1086}
1087
1088MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
1089MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
1090MODULE_LICENSE("GPL");
1091
1092module_init(dp83640_init);
1093module_exit(dp83640_exit);
1094
1095static struct mdio_device_id __maybe_unused dp83640_tbl[] = {
1096 { DP83640_PHY_ID, 0xfffffff0 },
1097 { }
1098};
1099
1100MODULE_DEVICE_TABLE(mdio, dp83640_tbl);
diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h
new file mode 100644
index 000000000000..e7fe41117003
--- /dev/null
+++ b/drivers/net/phy/dp83640_reg.h
@@ -0,0 +1,267 @@
1/* dp83640_reg.h
2 * Generated by regen.tcl on Thu Feb 17 10:02:48 AM CET 2011
3 */
4#ifndef HAVE_DP83640_REGISTERS
5#define HAVE_DP83640_REGISTERS
6
7#define PAGE0 0x0000
8#define PHYCR2 0x001c /* PHY Control Register 2 */
9
10#define PAGE4 0x0004
11#define PTP_CTL 0x0014 /* PTP Control Register */
12#define PTP_TDR 0x0015 /* PTP Time Data Register */
13#define PTP_STS 0x0016 /* PTP Status Register */
14#define PTP_TSTS 0x0017 /* PTP Trigger Status Register */
15#define PTP_RATEL 0x0018 /* PTP Rate Low Register */
16#define PTP_RATEH 0x0019 /* PTP Rate High Register */
17#define PTP_RDCKSUM 0x001a /* PTP Read Checksum */
18#define PTP_WRCKSUM 0x001b /* PTP Write Checksum */
19#define PTP_TXTS 0x001c /* PTP Transmit Timestamp Register, in four 16-bit reads */
20#define PTP_RXTS 0x001d /* PTP Receive Timestamp Register, in six? 16-bit reads */
21#define PTP_ESTS 0x001e /* PTP Event Status Register */
22#define PTP_EDATA 0x001f /* PTP Event Data Register */
23
24#define PAGE5 0x0005
25#define PTP_TRIG 0x0014 /* PTP Trigger Configuration Register */
26#define PTP_EVNT 0x0015 /* PTP Event Configuration Register */
27#define PTP_TXCFG0 0x0016 /* PTP Transmit Configuration Register 0 */
28#define PTP_TXCFG1 0x0017 /* PTP Transmit Configuration Register 1 */
29#define PSF_CFG0 0x0018 /* PHY Status Frame Configuration Register 0 */
30#define PTP_RXCFG0 0x0019 /* PTP Receive Configuration Register 0 */
31#define PTP_RXCFG1 0x001a /* PTP Receive Configuration Register 1 */
32#define PTP_RXCFG2 0x001b /* PTP Receive Configuration Register 2 */
33#define PTP_RXCFG3 0x001c /* PTP Receive Configuration Register 3 */
34#define PTP_RXCFG4 0x001d /* PTP Receive Configuration Register 4 */
35#define PTP_TRDL 0x001e /* PTP Temporary Rate Duration Low Register */
36#define PTP_TRDH 0x001f /* PTP Temporary Rate Duration High Register */
37
38#define PAGE6 0x0006
39#define PTP_COC 0x0014 /* PTP Clock Output Control Register */
40#define PSF_CFG1 0x0015 /* PHY Status Frame Configuration Register 1 */
41#define PSF_CFG2 0x0016 /* PHY Status Frame Configuration Register 2 */
42#define PSF_CFG3 0x0017 /* PHY Status Frame Configuration Register 3 */
43#define PSF_CFG4 0x0018 /* PHY Status Frame Configuration Register 4 */
44#define PTP_SFDCFG 0x0019 /* PTP SFD Configuration Register */
45#define PTP_INTCTL 0x001a /* PTP Interrupt Control Register */
46#define PTP_CLKSRC 0x001b /* PTP Clock Source Register */
47#define PTP_ETR 0x001c /* PTP Ethernet Type Register */
48#define PTP_OFF 0x001d /* PTP Offset Register */
49#define PTP_GPIOMON 0x001e /* PTP GPIO Monitor Register */
50#define PTP_RXHASH 0x001f /* PTP Receive Hash Register */
51
52/* Bit definitions for the PHYCR2 register */
53#define BC_WRITE (1<<11) /* Broadcast Write Enable */
54
55/* Bit definitions for the PTP_CTL register */
56#define TRIG_SEL_SHIFT (10) /* PTP Trigger Select */
57#define TRIG_SEL_MASK (0x7)
58#define TRIG_DIS (1<<9) /* Disable PTP Trigger */
59#define TRIG_EN (1<<8) /* Enable PTP Trigger */
60#define TRIG_READ (1<<7) /* Read PTP Trigger */
61#define TRIG_LOAD (1<<6) /* Load PTP Trigger */
62#define PTP_RD_CLK (1<<5) /* Read PTP Clock */
63#define PTP_LOAD_CLK (1<<4) /* Load PTP Clock */
64#define PTP_STEP_CLK (1<<3) /* Step PTP Clock */
65#define PTP_ENABLE (1<<2) /* Enable PTP Clock */
66#define PTP_DISABLE (1<<1) /* Disable PTP Clock */
67#define PTP_RESET (1<<0) /* Reset PTP Clock */
68
69/* Bit definitions for the PTP_STS register */
70#define TXTS_RDY (1<<11) /* Transmit Timestamp Ready */
71#define RXTS_RDY (1<<10) /* Receive Timestamp Ready */
72#define TRIG_DONE (1<<9) /* PTP Trigger Done */
73#define EVENT_RDY (1<<8) /* PTP Event Timestamp Ready */
74#define TXTS_IE (1<<3) /* Transmit Timestamp Interrupt Enable */
75#define RXTS_IE (1<<2) /* Receive Timestamp Interrupt Enable */
76#define TRIG_IE (1<<1) /* Trigger Interrupt Enable */
77#define EVENT_IE (1<<0) /* Event Interrupt Enable */
78
79/* Bit definitions for the PTP_TSTS register */
80#define TRIG7_ERROR (1<<15) /* Trigger 7 Error */
81#define TRIG7_ACTIVE (1<<14) /* Trigger 7 Active */
82#define TRIG6_ERROR (1<<13) /* Trigger 6 Error */
83#define TRIG6_ACTIVE (1<<12) /* Trigger 6 Active */
84#define TRIG5_ERROR (1<<11) /* Trigger 5 Error */
85#define TRIG5_ACTIVE (1<<10) /* Trigger 5 Active */
86#define TRIG4_ERROR (1<<9) /* Trigger 4 Error */
87#define TRIG4_ACTIVE (1<<8) /* Trigger 4 Active */
88#define TRIG3_ERROR (1<<7) /* Trigger 3 Error */
89#define TRIG3_ACTIVE (1<<6) /* Trigger 3 Active */
90#define TRIG2_ERROR (1<<5) /* Trigger 2 Error */
91#define TRIG2_ACTIVE (1<<4) /* Trigger 2 Active */
92#define TRIG1_ERROR (1<<3) /* Trigger 1 Error */
93#define TRIG1_ACTIVE (1<<2) /* Trigger 1 Active */
94#define TRIG0_ERROR (1<<1) /* Trigger 0 Error */
95#define TRIG0_ACTIVE (1<<0) /* Trigger 0 Active */
96
97/* Bit definitions for the PTP_RATEH register */
98#define PTP_RATE_DIR (1<<15) /* PTP Rate Direction */
99#define PTP_TMP_RATE (1<<14) /* PTP Temporary Rate */
100#define PTP_RATE_HI_SHIFT (0) /* PTP Rate High 10-bits */
101#define PTP_RATE_HI_MASK (0x3ff)
102
103/* Bit definitions for the PTP_ESTS register */
104#define EVNTS_MISSED_SHIFT (8) /* Indicates number of events missed */
105#define EVNTS_MISSED_MASK (0x7)
106#define EVNT_TS_LEN_SHIFT (6) /* Indicates length of the Timestamp field in 16-bit words minus 1 */
107#define EVNT_TS_LEN_MASK (0x3)
108#define EVNT_RF (1<<5) /* Indicates whether the event is a rise or falling event */
109#define EVNT_NUM_SHIFT (2) /* Indicates Event Timestamp Unit which detected an event */
110#define EVNT_NUM_MASK (0x7)
111#define MULT_EVNT (1<<1) /* Indicates multiple events were detected at the same time */
112#define EVENT_DET (1<<0) /* PTP Event Detected */
113
114/* Bit definitions for the PTP_EDATA register */
115#define E7_RISE (1<<15) /* Indicates direction of Event 7 */
116#define E7_DET (1<<14) /* Indicates Event 7 detected */
117#define E6_RISE (1<<13) /* Indicates direction of Event 6 */
118#define E6_DET (1<<12) /* Indicates Event 6 detected */
119#define E5_RISE (1<<11) /* Indicates direction of Event 5 */
120#define E5_DET (1<<10) /* Indicates Event 5 detected */
121#define E4_RISE (1<<9) /* Indicates direction of Event 4 */
122#define E4_DET (1<<8) /* Indicates Event 4 detected */
123#define E3_RISE (1<<7) /* Indicates direction of Event 3 */
124#define E3_DET (1<<6) /* Indicates Event 3 detected */
125#define E2_RISE (1<<5) /* Indicates direction of Event 2 */
126#define E2_DET (1<<4) /* Indicates Event 2 detected */
127#define E1_RISE (1<<3) /* Indicates direction of Event 1 */
128#define E1_DET (1<<2) /* Indicates Event 1 detected */
129#define E0_RISE (1<<1) /* Indicates direction of Event 0 */
130#define E0_DET (1<<0) /* Indicates Event 0 detected */
131
132/* Bit definitions for the PTP_TRIG register */
133#define TRIG_PULSE (1<<15) /* generate a Pulse rather than a single edge */
134#define TRIG_PER (1<<14) /* generate a periodic signal */
135#define TRIG_IF_LATE (1<<13) /* trigger immediately if already past */
136#define TRIG_NOTIFY (1<<12) /* Trigger Notification Enable */
137#define TRIG_GPIO_SHIFT (8) /* Trigger GPIO Connection, value 1-12 */
138#define TRIG_GPIO_MASK (0xf)
139#define TRIG_TOGGLE (1<<7) /* Trigger Toggle Mode Enable */
140#define TRIG_CSEL_SHIFT (1) /* Trigger Configuration Select */
141#define TRIG_CSEL_MASK (0x7)
142#define TRIG_WR (1<<0) /* Trigger Configuration Write */
143
144/* Bit definitions for the PTP_EVNT register */
145#define EVNT_RISE (1<<14) /* Event Rise Detect Enable */
146#define EVNT_FALL (1<<13) /* Event Fall Detect Enable */
147#define EVNT_SINGLE (1<<12) /* enable single event capture operation */
148#define EVNT_GPIO_SHIFT (8) /* Event GPIO Connection, value 1-12 */
149#define EVNT_GPIO_MASK (0xf)
150#define EVNT_SEL_SHIFT (1) /* Event Select */
151#define EVNT_SEL_MASK (0x7)
152#define EVNT_WR (1<<0) /* Event Configuration Write */
153
154/* Bit definitions for the PTP_TXCFG0 register */
155#define SYNC_1STEP (1<<15) /* insert timestamp into transmit Sync Messages */
156#define DR_INSERT (1<<13) /* Insert Delay_Req Timestamp in Delay_Resp (dangerous) */
157#define NTP_TS_EN (1<<12) /* Enable Timestamping of NTP Packets */
158#define IGNORE_2STEP (1<<11) /* Ignore Two_Step flag for One-Step operation */
159#define CRC_1STEP (1<<10) /* Disable checking of CRC for One-Step operation */
160#define CHK_1STEP (1<<9) /* Enable UDP Checksum correction for One-Step Operation */
161#define IP1588_EN (1<<8) /* Enable IEEE 1588 defined IP address filter */
162#define TX_L2_EN (1<<7) /* Layer2 Timestamp Enable */
163#define TX_IPV6_EN (1<<6) /* IPv6 Timestamp Enable */
164#define TX_IPV4_EN (1<<5) /* IPv4 Timestamp Enable */
165#define TX_PTP_VER_SHIFT (1) /* Enable Timestamp capture for IEEE 1588 version X */
166#define TX_PTP_VER_MASK (0xf)
167#define TX_TS_EN (1<<0) /* Transmit Timestamp Enable */
168
169/* Bit definitions for the PTP_TXCFG1 register */
170#define BYTE0_MASK_SHIFT (8) /* Bit mask to be used for matching Byte0 of the PTP Message */
171#define BYTE0_MASK_MASK (0xff)
172#define BYTE0_DATA_SHIFT (0) /* Data to be used for matching Byte0 of the PTP Message */
173#define BYTE0_DATA_MASK (0xff)
174
175/* Bit definitions for the PSF_CFG0 register */
176#define MAC_SRC_ADD_SHIFT (11) /* Status Frame Mac Source Address */
177#define MAC_SRC_ADD_MASK (0x3)
178#define MIN_PRE_SHIFT (8) /* Status Frame Minimum Preamble */
179#define MIN_PRE_MASK (0x7)
180#define PSF_ENDIAN (1<<7) /* Status Frame Endian Control */
181#define PSF_IPV4 (1<<6) /* Status Frame IPv4 Enable */
182#define PSF_PCF_RD (1<<5) /* Control Frame Read PHY Status Frame Enable */
183#define PSF_ERR_EN (1<<4) /* Error PHY Status Frame Enable */
184#define PSF_TXTS_EN (1<<3) /* Transmit Timestamp PHY Status Frame Enable */
185#define PSF_RXTS_EN (1<<2) /* Receive Timestamp PHY Status Frame Enable */
186#define PSF_TRIG_EN (1<<1) /* Trigger PHY Status Frame Enable */
187#define PSF_EVNT_EN (1<<0) /* Event PHY Status Frame Enable */
188
189/* Bit definitions for the PTP_RXCFG0 register */
190#define DOMAIN_EN (1<<15) /* Domain Match Enable */
191#define ALT_MAST_DIS (1<<14) /* Alternate Master Timestamp Disable */
192#define USER_IP_SEL (1<<13) /* Selects portion of IP address accessible thru PTP_RXCFG2 */
193#define USER_IP_EN (1<<12) /* Enable User-programmed IP address filter */
194#define RX_SLAVE (1<<11) /* Receive Slave Only */
195#define IP1588_EN_SHIFT (8) /* Enable IEEE 1588 defined IP address filters */
196#define IP1588_EN_MASK (0xf)
197#define RX_L2_EN (1<<7) /* Layer2 Timestamp Enable */
198#define RX_IPV6_EN (1<<6) /* IPv6 Timestamp Enable */
199#define RX_IPV4_EN (1<<5) /* IPv4 Timestamp Enable */
200#define RX_PTP_VER_SHIFT (1) /* Enable Timestamp capture for IEEE 1588 version X */
201#define RX_PTP_VER_MASK (0xf)
202#define RX_TS_EN (1<<0) /* Receive Timestamp Enable */
203
204/* Bit definitions for the PTP_RXCFG1 register */
205#define BYTE0_MASK_SHIFT (8) /* Bit mask to be used for matching Byte0 of the PTP Message */
206#define BYTE0_MASK_MASK (0xff)
207#define BYTE0_DATA_SHIFT (0) /* Data to be used for matching Byte0 of the PTP Message */
208#define BYTE0_DATA_MASK (0xff)
209
210/* Bit definitions for the PTP_RXCFG3 register */
211#define TS_MIN_IFG_SHIFT (12) /* Minimum Inter-frame Gap */
212#define TS_MIN_IFG_MASK (0xf)
213#define ACC_UDP (1<<11) /* Record Timestamp if UDP Checksum Error */
214#define ACC_CRC (1<<10) /* Record Timestamp if CRC Error */
215#define TS_APPEND (1<<9) /* Append Timestamp for L2 */
216#define TS_INSERT (1<<8) /* Enable Timestamp Insertion */
217#define PTP_DOMAIN_SHIFT (0) /* PTP Message domainNumber field */
218#define PTP_DOMAIN_MASK (0xff)
219
220/* Bit definitions for the PTP_RXCFG4 register */
221#define IPV4_UDP_MOD (1<<15) /* Enable IPV4 UDP Modification */
222#define TS_SEC_EN (1<<14) /* Enable Timestamp Seconds */
223#define TS_SEC_LEN_SHIFT (12) /* Inserted Timestamp Seconds Length */
224#define TS_SEC_LEN_MASK (0x3)
225#define RXTS_NS_OFF_SHIFT (6) /* Receive Timestamp Nanoseconds offset */
226#define RXTS_NS_OFF_MASK (0x3f)
227#define RXTS_SEC_OFF_SHIFT (0) /* Receive Timestamp Seconds offset */
228#define RXTS_SEC_OFF_MASK (0x3f)
229
230/* Bit definitions for the PTP_COC register */
231#define PTP_CLKOUT_EN (1<<15) /* PTP Clock Output Enable */
232#define PTP_CLKOUT_SEL (1<<14) /* PTP Clock Output Source Select */
233#define PTP_CLKOUT_SPEEDSEL (1<<13) /* PTP Clock Output I/O Speed Select */
234#define PTP_CLKDIV_SHIFT (0) /* PTP Clock Divide-by Value */
235#define PTP_CLKDIV_MASK (0xff)
236
237/* Bit definitions for the PSF_CFG1 register */
238#define PTPRESERVED_SHIFT (12) /* PTP v2 reserved field */
239#define PTPRESERVED_MASK (0xf)
240#define VERSIONPTP_SHIFT (8) /* PTP v2 versionPTP field */
241#define VERSIONPTP_MASK (0xf)
242#define TRANSPORT_SPECIFIC_SHIFT (4) /* PTP v2 Header transportSpecific field */
243#define TRANSPORT_SPECIFIC_MASK (0xf)
244#define MESSAGETYPE_SHIFT (0) /* PTP v2 messageType field */
245#define MESSAGETYPE_MASK (0xf)
246
247/* Bit definitions for the PTP_SFDCFG register */
248#define TX_SFD_GPIO_SHIFT (4) /* TX SFD GPIO Select, value 1-12 */
249#define TX_SFD_GPIO_MASK (0xf)
250#define RX_SFD_GPIO_SHIFT (0) /* RX SFD GPIO Select, value 1-12 */
251#define RX_SFD_GPIO_MASK (0xf)
252
253/* Bit definitions for the PTP_INTCTL register */
254#define PTP_INT_GPIO_SHIFT (0) /* PTP Interrupt GPIO Select */
255#define PTP_INT_GPIO_MASK (0xf)
256
257/* Bit definitions for the PTP_CLKSRC register */
258#define CLK_SRC_SHIFT (14) /* PTP Clock Source Select */
259#define CLK_SRC_MASK (0x3)
260#define CLK_SRC_PER_SHIFT (0) /* PTP Clock Source Period */
261#define CLK_SRC_PER_MASK (0x7f)
262
263/* Bit definitions for the PTP_OFF register */
264#define PTP_OFFSET_SHIFT (0) /* PTP Message offset from preceding header */
265#define PTP_OFFSET_MASK (0xff)
266
267#endif
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 1e980fdd9d77..1e2af96fc29c 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -1658,11 +1658,9 @@ static int tile_net_stop(struct net_device *dev)
1658 while (tile_net_lepp_free_comps(dev, true)) 1658 while (tile_net_lepp_free_comps(dev, true))
1659 /* loop */; 1659 /* loop */;
1660 1660
1661 /* Wipe the EPP queue. */ 1661 /* Wipe the EPP queue, and wait till the stores hit the EPP. */
1662 memset(priv->eq, 0, sizeof(lepp_queue_t)); 1662 memset(priv->eq, 0, sizeof(lepp_queue_t));
1663 1663 mb();
1664 /* Evict the EPP queue. */
1665 finv_buffer(priv->eq, EQ_SIZE);
1666 1664
1667 return 0; 1665 return 0;
1668} 1666}
@@ -2398,7 +2396,7 @@ static void tile_net_cleanup(void)
2398 struct net_device *dev = tile_net_devs[i]; 2396 struct net_device *dev = tile_net_devs[i];
2399 struct tile_net_priv *priv = netdev_priv(dev); 2397 struct tile_net_priv *priv = netdev_priv(dev);
2400 unregister_netdev(dev); 2398 unregister_netdev(dev);
2401 finv_buffer(priv->eq, EQ_SIZE); 2399 finv_buffer_remote(priv->eq, EQ_SIZE, 0);
2402 __free_pages(priv->eq_pages, EQ_ORDER); 2400 __free_pages(priv->eq_pages, EQ_ORDER);
2403 free_netdev(dev); 2401 free_netdev(dev);
2404 } 2402 }
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 94a114aa8e28..b1396e5b2953 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -81,6 +81,19 @@ static void __iomem *rtl_cmd_addr;
81static u8 rtl_cmd_type; 81static u8 rtl_cmd_type;
82static u8 rtl_cmd_width; 82static u8 rtl_cmd_width;
83 83
84#ifndef readq
85static inline __u64 readq(const volatile void __iomem *addr)
86{
87 const volatile u32 __iomem *p = addr;
88 u32 low, high;
89
90 low = readl(p);
91 high = readl(p + 1);
92
93 return low + ((u64)high << 32);
94}
95#endif
96
84static void __iomem *rtl_port_map(phys_addr_t addr, unsigned long len) 97static void __iomem *rtl_port_map(phys_addr_t addr, unsigned long len)
85{ 98{
86 if (rtl_cmd_type == RTL_ADDR_TYPE_MMIO) 99 if (rtl_cmd_type == RTL_ADDR_TYPE_MMIO)
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 85c8ad43c0c5..5ffe7c398148 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -344,6 +344,19 @@ struct ips_driver {
344static bool 344static bool
345ips_gpu_turbo_enabled(struct ips_driver *ips); 345ips_gpu_turbo_enabled(struct ips_driver *ips);
346 346
347#ifndef readq
348static inline __u64 readq(const volatile void __iomem *addr)
349{
350 const volatile u32 __iomem *p = addr;
351 u32 low, high;
352
353 low = readl(p);
354 high = readl(p + 1);
355
356 return low + ((u64)high << 32);
357}
358#endif
359
347/** 360/**
348 * ips_cpu_busy - is CPU busy? 361 * ips_cpu_busy - is CPU busy?
349 * @ips: IPS driver struct 362 * @ips: IPS driver struct
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
new file mode 100644
index 000000000000..68d720102296
--- /dev/null
+++ b/drivers/ptp/Kconfig
@@ -0,0 +1,75 @@
1#
2# PTP clock support configuration
3#
4
5menu "PTP clock support"
6
7comment "Enable Device Drivers -> PPS to see the PTP clock options."
8 depends on PPS=n
9
10config PTP_1588_CLOCK
11 tristate "PTP clock support"
12 depends on EXPERIMENTAL
13 depends on PPS
14 help
15 The IEEE 1588 standard defines a method to precisely
16 synchronize distributed clocks over Ethernet networks. The
17 standard defines a Precision Time Protocol (PTP), which can
18 be used to achieve synchronization within a few dozen
19 microseconds. In addition, with the help of special hardware
20 time stamping units, it can be possible to achieve
21 synchronization to within a few hundred nanoseconds.
22
23 This driver adds support for PTP clocks as character
24 devices. If you want to use a PTP clock, then you should
25 also enable at least one clock driver as well.
26
27 To compile this driver as a module, choose M here: the module
28 will be called ptp.
29
30config PTP_1588_CLOCK_GIANFAR
31 tristate "Freescale eTSEC as PTP clock"
32 depends on PTP_1588_CLOCK
33 depends on GIANFAR
34 help
35 This driver adds support for using the eTSEC as a PTP
36 clock. This clock is only useful if your PTP programs are
37 getting hardware time stamps on the PTP Ethernet packets
38 using the SO_TIMESTAMPING API.
39
40 To compile this driver as a module, choose M here: the module
41 will be called gianfar_ptp.
42
43config PTP_1588_CLOCK_IXP46X
44 tristate "Intel IXP46x as PTP clock"
45 depends on PTP_1588_CLOCK
46 depends on IXP4XX_ETH
47 help
48 This driver adds support for using the IXP46X as a PTP
49 clock. This clock is only useful if your PTP programs are
50 getting hardware time stamps on the PTP Ethernet packets
51 using the SO_TIMESTAMPING API.
52
53 To compile this driver as a module, choose M here: the module
54 will be called ptp_ixp46x.
55
56comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
57 depends on PTP_1588_CLOCK && (PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n)
58
59config DP83640_PHY
60 tristate "Driver for the National Semiconductor DP83640 PHYTER"
61 depends on PTP_1588_CLOCK
62 depends on NETWORK_PHY_TIMESTAMPING
63 depends on PHYLIB
64 ---help---
65 Supports the DP83640 PHYTER with IEEE 1588 features.
66
67 This driver adds support for using the DP83640 as a PTP
68 clock. This clock is only useful if your PTP programs are
69 getting hardware time stamps on the PTP Ethernet packets
70 using the SO_TIMESTAMPING API.
71
72 In order for this to work, your MAC driver must also
73 implement the skb_tx_timetamp() function.
74
75endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
new file mode 100644
index 000000000000..f6933e83de72
--- /dev/null
+++ b/drivers/ptp/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for PTP 1588 clock support.
3#
4
5ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
6obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o
7obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
new file mode 100644
index 000000000000..a8d03aeb4051
--- /dev/null
+++ b/drivers/ptp/ptp_chardev.c
@@ -0,0 +1,159 @@
1/*
2 * PTP 1588 clock support - character device implementation.
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/module.h>
21#include <linux/posix-clock.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24
25#include "ptp_private.h"
26
27int ptp_open(struct posix_clock *pc, fmode_t fmode)
28{
29 return 0;
30}
31
32long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
33{
34 struct ptp_clock_caps caps;
35 struct ptp_clock_request req;
36 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
37 struct ptp_clock_info *ops = ptp->info;
38 int enable, err = 0;
39
40 switch (cmd) {
41
42 case PTP_CLOCK_GETCAPS:
43 memset(&caps, 0, sizeof(caps));
44 caps.max_adj = ptp->info->max_adj;
45 caps.n_alarm = ptp->info->n_alarm;
46 caps.n_ext_ts = ptp->info->n_ext_ts;
47 caps.n_per_out = ptp->info->n_per_out;
48 caps.pps = ptp->info->pps;
49 err = copy_to_user((void __user *)arg, &caps, sizeof(caps));
50 break;
51
52 case PTP_EXTTS_REQUEST:
53 if (copy_from_user(&req.extts, (void __user *)arg,
54 sizeof(req.extts))) {
55 err = -EFAULT;
56 break;
57 }
58 if (req.extts.index >= ops->n_ext_ts) {
59 err = -EINVAL;
60 break;
61 }
62 req.type = PTP_CLK_REQ_EXTTS;
63 enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
64 err = ops->enable(ops, &req, enable);
65 break;
66
67 case PTP_PEROUT_REQUEST:
68 if (copy_from_user(&req.perout, (void __user *)arg,
69 sizeof(req.perout))) {
70 err = -EFAULT;
71 break;
72 }
73 if (req.perout.index >= ops->n_per_out) {
74 err = -EINVAL;
75 break;
76 }
77 req.type = PTP_CLK_REQ_PEROUT;
78 enable = req.perout.period.sec || req.perout.period.nsec;
79 err = ops->enable(ops, &req, enable);
80 break;
81
82 case PTP_ENABLE_PPS:
83 if (!capable(CAP_SYS_TIME))
84 return -EPERM;
85 req.type = PTP_CLK_REQ_PPS;
86 enable = arg ? 1 : 0;
87 err = ops->enable(ops, &req, enable);
88 break;
89
90 default:
91 err = -ENOTTY;
92 break;
93 }
94 return err;
95}
96
97unsigned int ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
98{
99 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
100
101 poll_wait(fp, &ptp->tsev_wq, wait);
102
103 return queue_cnt(&ptp->tsevq) ? POLLIN : 0;
104}
105
106ssize_t ptp_read(struct posix_clock *pc,
107 uint rdflags, char __user *buf, size_t cnt)
108{
109 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
110 struct timestamp_event_queue *queue = &ptp->tsevq;
111 struct ptp_extts_event event[PTP_BUF_TIMESTAMPS];
112 unsigned long flags;
113 size_t qcnt, i;
114
115 if (cnt % sizeof(struct ptp_extts_event) != 0)
116 return -EINVAL;
117
118 if (cnt > sizeof(event))
119 cnt = sizeof(event);
120
121 cnt = cnt / sizeof(struct ptp_extts_event);
122
123 if (mutex_lock_interruptible(&ptp->tsevq_mux))
124 return -ERESTARTSYS;
125
126 if (wait_event_interruptible(ptp->tsev_wq,
127 ptp->defunct || queue_cnt(queue))) {
128 mutex_unlock(&ptp->tsevq_mux);
129 return -ERESTARTSYS;
130 }
131
132 if (ptp->defunct)
133 return -ENODEV;
134
135 spin_lock_irqsave(&queue->lock, flags);
136
137 qcnt = queue_cnt(queue);
138
139 if (cnt > qcnt)
140 cnt = qcnt;
141
142 for (i = 0; i < cnt; i++) {
143 event[i] = queue->buf[queue->head];
144 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
145 }
146
147 spin_unlock_irqrestore(&queue->lock, flags);
148
149 cnt = cnt * sizeof(struct ptp_extts_event);
150
151 mutex_unlock(&ptp->tsevq_mux);
152
153 if (copy_to_user(buf, event, cnt)) {
154 mutex_unlock(&ptp->tsevq_mux);
155 return -EFAULT;
156 }
157
158 return cnt;
159}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
new file mode 100644
index 000000000000..cf3f9997546d
--- /dev/null
+++ b/drivers/ptp/ptp_clock.c
@@ -0,0 +1,343 @@
1/*
2 * PTP 1588 clock support
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/bitops.h>
21#include <linux/device.h>
22#include <linux/err.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/posix-clock.h>
27#include <linux/pps_kernel.h>
28#include <linux/slab.h>
29#include <linux/syscalls.h>
30#include <linux/uaccess.h>
31
32#include "ptp_private.h"
33
34#define PTP_MAX_ALARMS 4
35#define PTP_MAX_CLOCKS 8
36#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
37#define PTP_PPS_EVENT PPS_CAPTUREASSERT
38#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
39
40/* private globals */
41
42static dev_t ptp_devt;
43static struct class *ptp_class;
44
45static DECLARE_BITMAP(ptp_clocks_map, PTP_MAX_CLOCKS);
46static DEFINE_MUTEX(ptp_clocks_mutex); /* protects 'ptp_clocks_map' */
47
48/* time stamp event queue operations */
49
50static inline int queue_free(struct timestamp_event_queue *q)
51{
52 return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
53}
54
55static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
56 struct ptp_clock_event *src)
57{
58 struct ptp_extts_event *dst;
59 unsigned long flags;
60 s64 seconds;
61 u32 remainder;
62
63 seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
64
65 spin_lock_irqsave(&queue->lock, flags);
66
67 dst = &queue->buf[queue->tail];
68 dst->index = src->index;
69 dst->t.sec = seconds;
70 dst->t.nsec = remainder;
71
72 if (!queue_free(queue))
73 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
74
75 queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
76
77 spin_unlock_irqrestore(&queue->lock, flags);
78}
79
80static s32 scaled_ppm_to_ppb(long ppm)
81{
82 /*
83 * The 'freq' field in the 'struct timex' is in parts per
84 * million, but with a 16 bit binary fractional field.
85 *
86 * We want to calculate
87 *
88 * ppb = scaled_ppm * 1000 / 2^16
89 *
90 * which simplifies to
91 *
92 * ppb = scaled_ppm * 125 / 2^13
93 */
94 s64 ppb = 1 + ppm;
95 ppb *= 125;
96 ppb >>= 13;
97 return (s32) ppb;
98}
99
100/* posix clock implementation */
101
102static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
103{
104 return 1; /* always round timer functions to one nanosecond */
105}
106
107static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
108{
109 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
110 return ptp->info->settime(ptp->info, tp);
111}
112
113static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp)
114{
115 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
116 return ptp->info->gettime(ptp->info, tp);
117}
118
119static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
120{
121 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
122 struct ptp_clock_info *ops;
123 int err = -EOPNOTSUPP;
124
125 ops = ptp->info;
126
127 if (tx->modes & ADJ_SETOFFSET) {
128 struct timespec ts;
129 ktime_t kt;
130 s64 delta;
131
132 ts.tv_sec = tx->time.tv_sec;
133 ts.tv_nsec = tx->time.tv_usec;
134
135 if (!(tx->modes & ADJ_NANO))
136 ts.tv_nsec *= 1000;
137
138 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
139 return -EINVAL;
140
141 kt = timespec_to_ktime(ts);
142 delta = ktime_to_ns(kt);
143 err = ops->adjtime(ops, delta);
144
145 } else if (tx->modes & ADJ_FREQUENCY) {
146
147 err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq));
148 }
149
150 return err;
151}
152
153static struct posix_clock_operations ptp_clock_ops = {
154 .owner = THIS_MODULE,
155 .clock_adjtime = ptp_clock_adjtime,
156 .clock_gettime = ptp_clock_gettime,
157 .clock_getres = ptp_clock_getres,
158 .clock_settime = ptp_clock_settime,
159 .ioctl = ptp_ioctl,
160 .open = ptp_open,
161 .poll = ptp_poll,
162 .read = ptp_read,
163};
164
165static void delete_ptp_clock(struct posix_clock *pc)
166{
167 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
168
169 mutex_destroy(&ptp->tsevq_mux);
170
171 /* Remove the clock from the bit map. */
172 mutex_lock(&ptp_clocks_mutex);
173 clear_bit(ptp->index, ptp_clocks_map);
174 mutex_unlock(&ptp_clocks_mutex);
175
176 kfree(ptp);
177}
178
179/* public interface */
180
181struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info)
182{
183 struct ptp_clock *ptp;
184 int err = 0, index, major = MAJOR(ptp_devt);
185
186 if (info->n_alarm > PTP_MAX_ALARMS)
187 return ERR_PTR(-EINVAL);
188
189 /* Find a free clock slot and reserve it. */
190 err = -EBUSY;
191 mutex_lock(&ptp_clocks_mutex);
192 index = find_first_zero_bit(ptp_clocks_map, PTP_MAX_CLOCKS);
193 if (index < PTP_MAX_CLOCKS)
194 set_bit(index, ptp_clocks_map);
195 else
196 goto no_slot;
197
198 /* Initialize a clock structure. */
199 err = -ENOMEM;
200 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
201 if (ptp == NULL)
202 goto no_memory;
203
204 ptp->clock.ops = ptp_clock_ops;
205 ptp->clock.release = delete_ptp_clock;
206 ptp->info = info;
207 ptp->devid = MKDEV(major, index);
208 ptp->index = index;
209 spin_lock_init(&ptp->tsevq.lock);
210 mutex_init(&ptp->tsevq_mux);
211 init_waitqueue_head(&ptp->tsev_wq);
212
213 /* Create a new device in our class. */
214 ptp->dev = device_create(ptp_class, NULL, ptp->devid, ptp,
215 "ptp%d", ptp->index);
216 if (IS_ERR(ptp->dev))
217 goto no_device;
218
219 dev_set_drvdata(ptp->dev, ptp);
220
221 err = ptp_populate_sysfs(ptp);
222 if (err)
223 goto no_sysfs;
224
225 /* Register a new PPS source. */
226 if (info->pps) {
227 struct pps_source_info pps;
228 memset(&pps, 0, sizeof(pps));
229 snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index);
230 pps.mode = PTP_PPS_MODE;
231 pps.owner = info->owner;
232 ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
233 if (!ptp->pps_source) {
234 pr_err("failed to register pps source\n");
235 goto no_pps;
236 }
237 }
238
239 /* Create a posix clock. */
240 err = posix_clock_register(&ptp->clock, ptp->devid);
241 if (err) {
242 pr_err("failed to create posix clock\n");
243 goto no_clock;
244 }
245
246 mutex_unlock(&ptp_clocks_mutex);
247 return ptp;
248
249no_clock:
250 if (ptp->pps_source)
251 pps_unregister_source(ptp->pps_source);
252no_pps:
253 ptp_cleanup_sysfs(ptp);
254no_sysfs:
255 device_destroy(ptp_class, ptp->devid);
256no_device:
257 mutex_destroy(&ptp->tsevq_mux);
258 kfree(ptp);
259no_memory:
260 clear_bit(index, ptp_clocks_map);
261no_slot:
262 mutex_unlock(&ptp_clocks_mutex);
263 return ERR_PTR(err);
264}
265EXPORT_SYMBOL(ptp_clock_register);
266
267int ptp_clock_unregister(struct ptp_clock *ptp)
268{
269 ptp->defunct = 1;
270 wake_up_interruptible(&ptp->tsev_wq);
271
272 /* Release the clock's resources. */
273 if (ptp->pps_source)
274 pps_unregister_source(ptp->pps_source);
275 ptp_cleanup_sysfs(ptp);
276 device_destroy(ptp_class, ptp->devid);
277
278 posix_clock_unregister(&ptp->clock);
279 return 0;
280}
281EXPORT_SYMBOL(ptp_clock_unregister);
282
283void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
284{
285 struct pps_event_time evt;
286
287 switch (event->type) {
288
289 case PTP_CLOCK_ALARM:
290 break;
291
292 case PTP_CLOCK_EXTTS:
293 enqueue_external_timestamp(&ptp->tsevq, event);
294 wake_up_interruptible(&ptp->tsev_wq);
295 break;
296
297 case PTP_CLOCK_PPS:
298 pps_get_ts(&evt);
299 pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL);
300 break;
301 }
302}
303EXPORT_SYMBOL(ptp_clock_event);
304
305/* module operations */
306
307static void __exit ptp_exit(void)
308{
309 class_destroy(ptp_class);
310 unregister_chrdev_region(ptp_devt, PTP_MAX_CLOCKS);
311}
312
313static int __init ptp_init(void)
314{
315 int err;
316
317 ptp_class = class_create(THIS_MODULE, "ptp");
318 if (IS_ERR(ptp_class)) {
319 pr_err("ptp: failed to allocate class\n");
320 return PTR_ERR(ptp_class);
321 }
322
323 err = alloc_chrdev_region(&ptp_devt, 0, PTP_MAX_CLOCKS, "ptp");
324 if (err < 0) {
325 pr_err("ptp: failed to allocate device region\n");
326 goto no_region;
327 }
328
329 ptp_class->dev_attrs = ptp_dev_attrs;
330 pr_info("PTP clock support registered\n");
331 return 0;
332
333no_region:
334 class_destroy(ptp_class);
335 return err;
336}
337
338subsys_initcall(ptp_init);
339module_exit(ptp_exit);
340
341MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
342MODULE_DESCRIPTION("PTP clocks support");
343MODULE_LICENSE("GPL");
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
new file mode 100644
index 000000000000..803d665b15ef
--- /dev/null
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -0,0 +1,332 @@
1/*
2 * PTP 1588 clock using the IXP46X
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/device.h>
21#include <linux/err.h>
22#include <linux/gpio.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/irq.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29
30#include <linux/ptp_clock_kernel.h>
31#include <mach/ixp46x_ts.h>
32
33#define DRIVER "ptp_ixp46x"
34#define N_EXT_TS 2
35#define MASTER_GPIO 8
36#define MASTER_IRQ 25
37#define SLAVE_GPIO 7
38#define SLAVE_IRQ 24
39
40struct ixp_clock {
41 struct ixp46x_ts_regs *regs;
42 struct ptp_clock *ptp_clock;
43 struct ptp_clock_info caps;
44 int exts0_enabled;
45 int exts1_enabled;
46};
47
48DEFINE_SPINLOCK(register_lock);
49
50/*
51 * Register access functions
52 */
53
54static u64 ixp_systime_read(struct ixp46x_ts_regs *regs)
55{
56 u64 ns;
57 u32 lo, hi;
58
59 lo = __raw_readl(&regs->systime_lo);
60 hi = __raw_readl(&regs->systime_hi);
61
62 ns = ((u64) hi) << 32;
63 ns |= lo;
64 ns <<= TICKS_NS_SHIFT;
65
66 return ns;
67}
68
69static void ixp_systime_write(struct ixp46x_ts_regs *regs, u64 ns)
70{
71 u32 hi, lo;
72
73 ns >>= TICKS_NS_SHIFT;
74 hi = ns >> 32;
75 lo = ns & 0xffffffff;
76
77 __raw_writel(lo, &regs->systime_lo);
78 __raw_writel(hi, &regs->systime_hi);
79}
80
81/*
82 * Interrupt service routine
83 */
84
85static irqreturn_t isr(int irq, void *priv)
86{
87 struct ixp_clock *ixp_clock = priv;
88 struct ixp46x_ts_regs *regs = ixp_clock->regs;
89 struct ptp_clock_event event;
90 u32 ack = 0, lo, hi, val;
91
92 val = __raw_readl(&regs->event);
93
94 if (val & TSER_SNS) {
95 ack |= TSER_SNS;
96 if (ixp_clock->exts0_enabled) {
97 hi = __raw_readl(&regs->asms_hi);
98 lo = __raw_readl(&regs->asms_lo);
99 event.type = PTP_CLOCK_EXTTS;
100 event.index = 0;
101 event.timestamp = ((u64) hi) << 32;
102 event.timestamp |= lo;
103 event.timestamp <<= TICKS_NS_SHIFT;
104 ptp_clock_event(ixp_clock->ptp_clock, &event);
105 }
106 }
107
108 if (val & TSER_SNM) {
109 ack |= TSER_SNM;
110 if (ixp_clock->exts1_enabled) {
111 hi = __raw_readl(&regs->amms_hi);
112 lo = __raw_readl(&regs->amms_lo);
113 event.type = PTP_CLOCK_EXTTS;
114 event.index = 1;
115 event.timestamp = ((u64) hi) << 32;
116 event.timestamp |= lo;
117 event.timestamp <<= TICKS_NS_SHIFT;
118 ptp_clock_event(ixp_clock->ptp_clock, &event);
119 }
120 }
121
122 if (val & TTIPEND)
123 ack |= TTIPEND; /* this bit seems to be always set */
124
125 if (ack) {
126 __raw_writel(ack, &regs->event);
127 return IRQ_HANDLED;
128 } else
129 return IRQ_NONE;
130}
131
132/*
133 * PTP clock operations
134 */
135
136static int ptp_ixp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
137{
138 u64 adj;
139 u32 diff, addend;
140 int neg_adj = 0;
141 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
142 struct ixp46x_ts_regs *regs = ixp_clock->regs;
143
144 if (ppb < 0) {
145 neg_adj = 1;
146 ppb = -ppb;
147 }
148 addend = DEFAULT_ADDEND;
149 adj = addend;
150 adj *= ppb;
151 diff = div_u64(adj, 1000000000ULL);
152
153 addend = neg_adj ? addend - diff : addend + diff;
154
155 __raw_writel(addend, &regs->addend);
156
157 return 0;
158}
159
160static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta)
161{
162 s64 now;
163 unsigned long flags;
164 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
165 struct ixp46x_ts_regs *regs = ixp_clock->regs;
166
167 spin_lock_irqsave(&register_lock, flags);
168
169 now = ixp_systime_read(regs);
170 now += delta;
171 ixp_systime_write(regs, now);
172
173 spin_unlock_irqrestore(&register_lock, flags);
174
175 return 0;
176}
177
178static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
179{
180 u64 ns;
181 u32 remainder;
182 unsigned long flags;
183 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
184 struct ixp46x_ts_regs *regs = ixp_clock->regs;
185
186 spin_lock_irqsave(&register_lock, flags);
187
188 ns = ixp_systime_read(regs);
189
190 spin_unlock_irqrestore(&register_lock, flags);
191
192 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
193 ts->tv_nsec = remainder;
194 return 0;
195}
196
197static int ptp_ixp_settime(struct ptp_clock_info *ptp,
198 const struct timespec *ts)
199{
200 u64 ns;
201 unsigned long flags;
202 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
203 struct ixp46x_ts_regs *regs = ixp_clock->regs;
204
205 ns = ts->tv_sec * 1000000000ULL;
206 ns += ts->tv_nsec;
207
208 spin_lock_irqsave(&register_lock, flags);
209
210 ixp_systime_write(regs, ns);
211
212 spin_unlock_irqrestore(&register_lock, flags);
213
214 return 0;
215}
216
217static int ptp_ixp_enable(struct ptp_clock_info *ptp,
218 struct ptp_clock_request *rq, int on)
219{
220 struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
221
222 switch (rq->type) {
223 case PTP_CLK_REQ_EXTTS:
224 switch (rq->extts.index) {
225 case 0:
226 ixp_clock->exts0_enabled = on ? 1 : 0;
227 break;
228 case 1:
229 ixp_clock->exts1_enabled = on ? 1 : 0;
230 break;
231 default:
232 return -EINVAL;
233 }
234 return 0;
235 default:
236 break;
237 }
238
239 return -EOPNOTSUPP;
240}
241
242static struct ptp_clock_info ptp_ixp_caps = {
243 .owner = THIS_MODULE,
244 .name = "IXP46X timer",
245 .max_adj = 66666655,
246 .n_ext_ts = N_EXT_TS,
247 .pps = 0,
248 .adjfreq = ptp_ixp_adjfreq,
249 .adjtime = ptp_ixp_adjtime,
250 .gettime = ptp_ixp_gettime,
251 .settime = ptp_ixp_settime,
252 .enable = ptp_ixp_enable,
253};
254
255/* module operations */
256
257static struct ixp_clock ixp_clock;
258
259static int setup_interrupt(int gpio)
260{
261 int irq;
262
263 gpio_line_config(gpio, IXP4XX_GPIO_IN);
264
265 irq = gpio_to_irq(gpio);
266
267 if (NO_IRQ == irq)
268 return NO_IRQ;
269
270 if (irq_set_irq_type(irq, IRQF_TRIGGER_FALLING)) {
271 pr_err("cannot set trigger type for irq %d\n", irq);
272 return NO_IRQ;
273 }
274
275 if (request_irq(irq, isr, 0, DRIVER, &ixp_clock)) {
276 pr_err("request_irq failed for irq %d\n", irq);
277 return NO_IRQ;
278 }
279
280 return irq;
281}
282
283static void __exit ptp_ixp_exit(void)
284{
285 free_irq(MASTER_IRQ, &ixp_clock);
286 free_irq(SLAVE_IRQ, &ixp_clock);
287 ptp_clock_unregister(ixp_clock.ptp_clock);
288}
289
290static int __init ptp_ixp_init(void)
291{
292 if (!cpu_is_ixp46x())
293 return -ENODEV;
294
295 ixp_clock.regs =
296 (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
297
298 ixp_clock.caps = ptp_ixp_caps;
299
300 ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps);
301
302 if (IS_ERR(ixp_clock.ptp_clock))
303 return PTR_ERR(ixp_clock.ptp_clock);
304
305 __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend);
306 __raw_writel(1, &ixp_clock.regs->trgt_lo);
307 __raw_writel(0, &ixp_clock.regs->trgt_hi);
308 __raw_writel(TTIPEND, &ixp_clock.regs->event);
309
310 if (MASTER_IRQ != setup_interrupt(MASTER_GPIO)) {
311 pr_err("failed to setup gpio %d as irq\n", MASTER_GPIO);
312 goto no_master;
313 }
314 if (SLAVE_IRQ != setup_interrupt(SLAVE_GPIO)) {
315 pr_err("failed to setup gpio %d as irq\n", SLAVE_GPIO);
316 goto no_slave;
317 }
318
319 return 0;
320no_slave:
321 free_irq(MASTER_IRQ, &ixp_clock);
322no_master:
323 ptp_clock_unregister(ixp_clock.ptp_clock);
324 return -ENODEV;
325}
326
327module_init(ptp_ixp_init);
328module_exit(ptp_ixp_exit);
329
330MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
331MODULE_DESCRIPTION("PTP clock using the IXP46X timer");
332MODULE_LICENSE("GPL");
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
new file mode 100644
index 000000000000..4d5b5082c3b1
--- /dev/null
+++ b/drivers/ptp/ptp_private.h
@@ -0,0 +1,92 @@
1/*
2 * PTP 1588 clock support - private declarations for the core module.
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#ifndef _PTP_PRIVATE_H_
21#define _PTP_PRIVATE_H_
22
23#include <linux/cdev.h>
24#include <linux/device.h>
25#include <linux/mutex.h>
26#include <linux/posix-clock.h>
27#include <linux/ptp_clock.h>
28#include <linux/ptp_clock_kernel.h>
29#include <linux/time.h>
30
31#define PTP_MAX_TIMESTAMPS 128
32#define PTP_BUF_TIMESTAMPS 30
33
34struct timestamp_event_queue {
35 struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
36 int head;
37 int tail;
38 spinlock_t lock;
39};
40
41struct ptp_clock {
42 struct posix_clock clock;
43 struct device *dev;
44 struct ptp_clock_info *info;
45 dev_t devid;
46 int index; /* index into clocks.map */
47 struct pps_device *pps_source;
48 struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
49 struct mutex tsevq_mux; /* one process at a time reading the fifo */
50 wait_queue_head_t tsev_wq;
51 int defunct; /* tells readers to go away when clock is being removed */
52};
53
54/*
55 * The function queue_cnt() is safe for readers to call without
56 * holding q->lock. Readers use this function to verify that the queue
57 * is nonempty before proceeding with a dequeue operation. The fact
58 * that a writer might concurrently increment the tail does not
59 * matter, since the queue remains nonempty nonetheless.
60 */
61static inline int queue_cnt(struct timestamp_event_queue *q)
62{
63 int cnt = q->tail - q->head;
64 return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
65}
66
67/*
68 * see ptp_chardev.c
69 */
70
71long ptp_ioctl(struct posix_clock *pc,
72 unsigned int cmd, unsigned long arg);
73
74int ptp_open(struct posix_clock *pc, fmode_t fmode);
75
76ssize_t ptp_read(struct posix_clock *pc,
77 uint flags, char __user *buf, size_t cnt);
78
79uint ptp_poll(struct posix_clock *pc,
80 struct file *fp, poll_table *wait);
81
82/*
83 * see ptp_sysfs.c
84 */
85
86extern struct device_attribute ptp_dev_attrs[];
87
88int ptp_cleanup_sysfs(struct ptp_clock *ptp);
89
90int ptp_populate_sysfs(struct ptp_clock *ptp);
91
92#endif
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
new file mode 100644
index 000000000000..2f93926ac976
--- /dev/null
+++ b/drivers/ptp/ptp_sysfs.c
@@ -0,0 +1,230 @@
1/*
2 * PTP 1588 clock support - sysfs interface.
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/capability.h>
21
22#include "ptp_private.h"
23
24static ssize_t clock_name_show(struct device *dev,
25 struct device_attribute *attr, char *page)
26{
27 struct ptp_clock *ptp = dev_get_drvdata(dev);
28 return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name);
29}
30
31#define PTP_SHOW_INT(name) \
32static ssize_t name##_show(struct device *dev, \
33 struct device_attribute *attr, char *page) \
34{ \
35 struct ptp_clock *ptp = dev_get_drvdata(dev); \
36 return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->name); \
37}
38
39PTP_SHOW_INT(max_adj);
40PTP_SHOW_INT(n_alarm);
41PTP_SHOW_INT(n_ext_ts);
42PTP_SHOW_INT(n_per_out);
43PTP_SHOW_INT(pps);
44
45#define PTP_RO_ATTR(_var, _name) { \
46 .attr = { .name = __stringify(_name), .mode = 0444 }, \
47 .show = _var##_show, \
48}
49
50struct device_attribute ptp_dev_attrs[] = {
51 PTP_RO_ATTR(clock_name, clock_name),
52 PTP_RO_ATTR(max_adj, max_adjustment),
53 PTP_RO_ATTR(n_alarm, n_alarms),
54 PTP_RO_ATTR(n_ext_ts, n_external_timestamps),
55 PTP_RO_ATTR(n_per_out, n_periodic_outputs),
56 PTP_RO_ATTR(pps, pps_available),
57 __ATTR_NULL,
58};
59
60static ssize_t extts_enable_store(struct device *dev,
61 struct device_attribute *attr,
62 const char *buf, size_t count)
63{
64 struct ptp_clock *ptp = dev_get_drvdata(dev);
65 struct ptp_clock_info *ops = ptp->info;
66 struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS };
67 int cnt, enable;
68 int err = -EINVAL;
69
70 cnt = sscanf(buf, "%u %d", &req.extts.index, &enable);
71 if (cnt != 2)
72 goto out;
73 if (req.extts.index >= ops->n_ext_ts)
74 goto out;
75
76 err = ops->enable(ops, &req, enable ? 1 : 0);
77 if (err)
78 goto out;
79
80 return count;
81out:
82 return err;
83}
84
85static ssize_t extts_fifo_show(struct device *dev,
86 struct device_attribute *attr, char *page)
87{
88 struct ptp_clock *ptp = dev_get_drvdata(dev);
89 struct timestamp_event_queue *queue = &ptp->tsevq;
90 struct ptp_extts_event event;
91 unsigned long flags;
92 size_t qcnt;
93 int cnt = 0;
94
95 memset(&event, 0, sizeof(event));
96
97 if (mutex_lock_interruptible(&ptp->tsevq_mux))
98 return -ERESTARTSYS;
99
100 spin_lock_irqsave(&queue->lock, flags);
101 qcnt = queue_cnt(queue);
102 if (qcnt) {
103 event = queue->buf[queue->head];
104 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
105 }
106 spin_unlock_irqrestore(&queue->lock, flags);
107
108 if (!qcnt)
109 goto out;
110
111 cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
112 event.index, event.t.sec, event.t.nsec);
113out:
114 mutex_unlock(&ptp->tsevq_mux);
115 return cnt;
116}
117
118static ssize_t period_store(struct device *dev,
119 struct device_attribute *attr,
120 const char *buf, size_t count)
121{
122 struct ptp_clock *ptp = dev_get_drvdata(dev);
123 struct ptp_clock_info *ops = ptp->info;
124 struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT };
125 int cnt, enable, err = -EINVAL;
126
127 cnt = sscanf(buf, "%u %lld %u %lld %u", &req.perout.index,
128 &req.perout.start.sec, &req.perout.start.nsec,
129 &req.perout.period.sec, &req.perout.period.nsec);
130 if (cnt != 5)
131 goto out;
132 if (req.perout.index >= ops->n_per_out)
133 goto out;
134
135 enable = req.perout.period.sec || req.perout.period.nsec;
136 err = ops->enable(ops, &req, enable);
137 if (err)
138 goto out;
139
140 return count;
141out:
142 return err;
143}
144
145static ssize_t pps_enable_store(struct device *dev,
146 struct device_attribute *attr,
147 const char *buf, size_t count)
148{
149 struct ptp_clock *ptp = dev_get_drvdata(dev);
150 struct ptp_clock_info *ops = ptp->info;
151 struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
152 int cnt, enable;
153 int err = -EINVAL;
154
155 if (!capable(CAP_SYS_TIME))
156 return -EPERM;
157
158 cnt = sscanf(buf, "%d", &enable);
159 if (cnt != 1)
160 goto out;
161
162 err = ops->enable(ops, &req, enable ? 1 : 0);
163 if (err)
164 goto out;
165
166 return count;
167out:
168 return err;
169}
170
171static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
172static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
173static DEVICE_ATTR(period, 0220, NULL, period_store);
174static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
175
176int ptp_cleanup_sysfs(struct ptp_clock *ptp)
177{
178 struct device *dev = ptp->dev;
179 struct ptp_clock_info *info = ptp->info;
180
181 if (info->n_ext_ts) {
182 device_remove_file(dev, &dev_attr_extts_enable);
183 device_remove_file(dev, &dev_attr_fifo);
184 }
185 if (info->n_per_out)
186 device_remove_file(dev, &dev_attr_period);
187
188 if (info->pps)
189 device_remove_file(dev, &dev_attr_pps_enable);
190
191 return 0;
192}
193
194int ptp_populate_sysfs(struct ptp_clock *ptp)
195{
196 struct device *dev = ptp->dev;
197 struct ptp_clock_info *info = ptp->info;
198 int err;
199
200 if (info->n_ext_ts) {
201 err = device_create_file(dev, &dev_attr_extts_enable);
202 if (err)
203 goto out1;
204 err = device_create_file(dev, &dev_attr_fifo);
205 if (err)
206 goto out2;
207 }
208 if (info->n_per_out) {
209 err = device_create_file(dev, &dev_attr_period);
210 if (err)
211 goto out3;
212 }
213 if (info->pps) {
214 err = device_create_file(dev, &dev_attr_pps_enable);
215 if (err)
216 goto out4;
217 }
218 return 0;
219out4:
220 if (info->n_per_out)
221 device_remove_file(dev, &dev_attr_period);
222out3:
223 if (info->n_ext_ts)
224 device_remove_file(dev, &dev_attr_fifo);
225out2:
226 if (info->n_ext_ts)
227 device_remove_file(dev, &dev_attr_extts_enable);
228out1:
229 return err;
230}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index b9f29e0d4295..f0b13a0d1851 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -274,6 +274,13 @@ config REGULATOR_AB8500
274 This driver supports the regulators found on the ST-Ericsson mixed 274 This driver supports the regulators found on the ST-Ericsson mixed
275 signal AB8500 PMIC 275 signal AB8500 PMIC
276 276
277config REGULATOR_DB8500_PRCMU
278 bool "ST-Ericsson DB8500 Voltage Domain Regulators"
279 depends on MFD_DB8500_PRCMU
280 help
281 This driver supports the voltage domain regulators controlled by the
282 DB8500 PRCMU
283
277config REGULATOR_TPS6586X 284config REGULATOR_TPS6586X
278 tristate "TI TPS6586X Power regulators" 285 tristate "TI TPS6586X Power regulators"
279 depends on MFD_TPS6586X 286 depends on MFD_TPS6586X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index d72a42756778..165ff5371e9e 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -41,5 +41,6 @@ obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
41obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o 41obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
42obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o 42obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
43obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o 43obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
44obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
44 45
45ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG 46ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
new file mode 100644
index 000000000000..1089a961616e
--- /dev/null
+++ b/drivers/regulator/db8500-prcmu.c
@@ -0,0 +1,558 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
6 * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
7 *
8 * Power domain regulators on DB8500
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/err.h>
14#include <linux/spinlock.h>
15#include <linux/platform_device.h>
16#include <linux/mfd/core.h>
17#include <linux/mfd/db8500-prcmu.h>
18#include <linux/regulator/driver.h>
19#include <linux/regulator/machine.h>
20#include <linux/regulator/db8500-prcmu.h>
21
22/*
23 * power state reference count
24 */
25static int power_state_active_cnt; /* will initialize to zero */
26static DEFINE_SPINLOCK(power_state_active_lock);
27
28static void power_state_active_enable(void)
29{
30 unsigned long flags;
31
32 spin_lock_irqsave(&power_state_active_lock, flags);
33 power_state_active_cnt++;
34 spin_unlock_irqrestore(&power_state_active_lock, flags);
35}
36
37static int power_state_active_disable(void)
38{
39 int ret = 0;
40 unsigned long flags;
41
42 spin_lock_irqsave(&power_state_active_lock, flags);
43 if (power_state_active_cnt <= 0) {
44 pr_err("power state: unbalanced enable/disable calls\n");
45 ret = -EINVAL;
46 goto out;
47 }
48
49 power_state_active_cnt--;
50out:
51 spin_unlock_irqrestore(&power_state_active_lock, flags);
52 return ret;
53}
54
55/*
56 * Exported interface for CPUIdle only. This function is called when interrupts
57 * are turned off. Hence, no locking.
58 */
59int power_state_active_is_enabled(void)
60{
61 return (power_state_active_cnt > 0);
62}
63
64/**
65 * struct db8500_regulator_info - db8500 regulator information
66 * @dev: device pointer
67 * @desc: regulator description
68 * @rdev: regulator device pointer
69 * @is_enabled: status of the regulator
70 * @epod_id: id for EPOD (power domain)
71 * @is_ramret: RAM retention switch for EPOD (power domain)
72 * @operating_point: operating point (only for vape, to be removed)
73 *
74 */
75struct db8500_regulator_info {
76 struct device *dev;
77 struct regulator_desc desc;
78 struct regulator_dev *rdev;
79 bool is_enabled;
80 u16 epod_id;
81 bool is_ramret;
82 bool exclude_from_power_state;
83 unsigned int operating_point;
84};
85
86static int db8500_regulator_enable(struct regulator_dev *rdev)
87{
88 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
89
90 if (info == NULL)
91 return -EINVAL;
92
93 dev_vdbg(rdev_get_dev(rdev), "regulator-%s-enable\n",
94 info->desc.name);
95
96 info->is_enabled = true;
97 if (!info->exclude_from_power_state)
98 power_state_active_enable();
99
100 return 0;
101}
102
103static int db8500_regulator_disable(struct regulator_dev *rdev)
104{
105 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
106 int ret = 0;
107
108 if (info == NULL)
109 return -EINVAL;
110
111 dev_vdbg(rdev_get_dev(rdev), "regulator-%s-disable\n",
112 info->desc.name);
113
114 info->is_enabled = false;
115 if (!info->exclude_from_power_state)
116 ret = power_state_active_disable();
117
118 return ret;
119}
120
121static int db8500_regulator_is_enabled(struct regulator_dev *rdev)
122{
123 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
124
125 if (info == NULL)
126 return -EINVAL;
127
128 dev_vdbg(rdev_get_dev(rdev), "regulator-%s-is_enabled (is_enabled):"
129 " %i\n", info->desc.name, info->is_enabled);
130
131 return info->is_enabled;
132}
133
134/* db8500 regulator operations */
135static struct regulator_ops db8500_regulator_ops = {
136 .enable = db8500_regulator_enable,
137 .disable = db8500_regulator_disable,
138 .is_enabled = db8500_regulator_is_enabled,
139};
140
141/*
142 * EPOD control
143 */
144static bool epod_on[NUM_EPOD_ID];
145static bool epod_ramret[NUM_EPOD_ID];
146
147static int enable_epod(u16 epod_id, bool ramret)
148{
149 int ret;
150
151 if (ramret) {
152 if (!epod_on[epod_id]) {
153 ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
154 if (ret < 0)
155 return ret;
156 }
157 epod_ramret[epod_id] = true;
158 } else {
159 ret = prcmu_set_epod(epod_id, EPOD_STATE_ON);
160 if (ret < 0)
161 return ret;
162 epod_on[epod_id] = true;
163 }
164
165 return 0;
166}
167
168static int disable_epod(u16 epod_id, bool ramret)
169{
170 int ret;
171
172 if (ramret) {
173 if (!epod_on[epod_id]) {
174 ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
175 if (ret < 0)
176 return ret;
177 }
178 epod_ramret[epod_id] = false;
179 } else {
180 if (epod_ramret[epod_id]) {
181 ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
182 if (ret < 0)
183 return ret;
184 } else {
185 ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
186 if (ret < 0)
187 return ret;
188 }
189 epod_on[epod_id] = false;
190 }
191
192 return 0;
193}
194
195/*
196 * Regulator switch
197 */
198static int db8500_regulator_switch_enable(struct regulator_dev *rdev)
199{
200 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
201 int ret;
202
203 if (info == NULL)
204 return -EINVAL;
205
206 dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-enable\n",
207 info->desc.name);
208
209 ret = enable_epod(info->epod_id, info->is_ramret);
210 if (ret < 0) {
211 dev_err(rdev_get_dev(rdev),
212 "regulator-switch-%s-enable: prcmu call failed\n",
213 info->desc.name);
214 goto out;
215 }
216
217 info->is_enabled = true;
218out:
219 return ret;
220}
221
222static int db8500_regulator_switch_disable(struct regulator_dev *rdev)
223{
224 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
225 int ret;
226
227 if (info == NULL)
228 return -EINVAL;
229
230 dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-disable\n",
231 info->desc.name);
232
233 ret = disable_epod(info->epod_id, info->is_ramret);
234 if (ret < 0) {
235 dev_err(rdev_get_dev(rdev),
236 "regulator_switch-%s-disable: prcmu call failed\n",
237 info->desc.name);
238 goto out;
239 }
240
241 info->is_enabled = 0;
242out:
243 return ret;
244}
245
246static int db8500_regulator_switch_is_enabled(struct regulator_dev *rdev)
247{
248 struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
249
250 if (info == NULL)
251 return -EINVAL;
252
253 dev_vdbg(rdev_get_dev(rdev),
254 "regulator-switch-%s-is_enabled (is_enabled): %i\n",
255 info->desc.name, info->is_enabled);
256
257 return info->is_enabled;
258}
259
260static struct regulator_ops db8500_regulator_switch_ops = {
261 .enable = db8500_regulator_switch_enable,
262 .disable = db8500_regulator_switch_disable,
263 .is_enabled = db8500_regulator_switch_is_enabled,
264};
265
266/*
267 * Regulator information
268 */
269static struct db8500_regulator_info
270 db8500_regulator_info[DB8500_NUM_REGULATORS] = {
271 [DB8500_REGULATOR_VAPE] = {
272 .desc = {
273 .name = "db8500-vape",
274 .id = DB8500_REGULATOR_VAPE,
275 .ops = &db8500_regulator_ops,
276 .type = REGULATOR_VOLTAGE,
277 .owner = THIS_MODULE,
278 },
279 },
280 [DB8500_REGULATOR_VARM] = {
281 .desc = {
282 .name = "db8500-varm",
283 .id = DB8500_REGULATOR_VARM,
284 .ops = &db8500_regulator_ops,
285 .type = REGULATOR_VOLTAGE,
286 .owner = THIS_MODULE,
287 },
288 },
289 [DB8500_REGULATOR_VMODEM] = {
290 .desc = {
291 .name = "db8500-vmodem",
292 .id = DB8500_REGULATOR_VMODEM,
293 .ops = &db8500_regulator_ops,
294 .type = REGULATOR_VOLTAGE,
295 .owner = THIS_MODULE,
296 },
297 },
298 [DB8500_REGULATOR_VPLL] = {
299 .desc = {
300 .name = "db8500-vpll",
301 .id = DB8500_REGULATOR_VPLL,
302 .ops = &db8500_regulator_ops,
303 .type = REGULATOR_VOLTAGE,
304 .owner = THIS_MODULE,
305 },
306 },
307 [DB8500_REGULATOR_VSMPS1] = {
308 .desc = {
309 .name = "db8500-vsmps1",
310 .id = DB8500_REGULATOR_VSMPS1,
311 .ops = &db8500_regulator_ops,
312 .type = REGULATOR_VOLTAGE,
313 .owner = THIS_MODULE,
314 },
315 },
316 [DB8500_REGULATOR_VSMPS2] = {
317 .desc = {
318 .name = "db8500-vsmps2",
319 .id = DB8500_REGULATOR_VSMPS2,
320 .ops = &db8500_regulator_ops,
321 .type = REGULATOR_VOLTAGE,
322 .owner = THIS_MODULE,
323 },
324 .exclude_from_power_state = true,
325 },
326 [DB8500_REGULATOR_VSMPS3] = {
327 .desc = {
328 .name = "db8500-vsmps3",
329 .id = DB8500_REGULATOR_VSMPS3,
330 .ops = &db8500_regulator_ops,
331 .type = REGULATOR_VOLTAGE,
332 .owner = THIS_MODULE,
333 },
334 },
335 [DB8500_REGULATOR_VRF1] = {
336 .desc = {
337 .name = "db8500-vrf1",
338 .id = DB8500_REGULATOR_VRF1,
339 .ops = &db8500_regulator_ops,
340 .type = REGULATOR_VOLTAGE,
341 .owner = THIS_MODULE,
342 },
343 },
344 [DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
345 .desc = {
346 .name = "db8500-sva-mmdsp",
347 .id = DB8500_REGULATOR_SWITCH_SVAMMDSP,
348 .ops = &db8500_regulator_switch_ops,
349 .type = REGULATOR_VOLTAGE,
350 .owner = THIS_MODULE,
351 },
352 .epod_id = EPOD_ID_SVAMMDSP,
353 },
354 [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
355 .desc = {
356 .name = "db8500-sva-mmdsp-ret",
357 .id = DB8500_REGULATOR_SWITCH_SVAMMDSPRET,
358 .ops = &db8500_regulator_switch_ops,
359 .type = REGULATOR_VOLTAGE,
360 .owner = THIS_MODULE,
361 },
362 .epod_id = EPOD_ID_SVAMMDSP,
363 .is_ramret = true,
364 },
365 [DB8500_REGULATOR_SWITCH_SVAPIPE] = {
366 .desc = {
367 .name = "db8500-sva-pipe",
368 .id = DB8500_REGULATOR_SWITCH_SVAPIPE,
369 .ops = &db8500_regulator_switch_ops,
370 .type = REGULATOR_VOLTAGE,
371 .owner = THIS_MODULE,
372 },
373 .epod_id = EPOD_ID_SVAPIPE,
374 },
375 [DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
376 .desc = {
377 .name = "db8500-sia-mmdsp",
378 .id = DB8500_REGULATOR_SWITCH_SIAMMDSP,
379 .ops = &db8500_regulator_switch_ops,
380 .type = REGULATOR_VOLTAGE,
381 .owner = THIS_MODULE,
382 },
383 .epod_id = EPOD_ID_SIAMMDSP,
384 },
385 [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
386 .desc = {
387 .name = "db8500-sia-mmdsp-ret",
388 .id = DB8500_REGULATOR_SWITCH_SIAMMDSPRET,
389 .ops = &db8500_regulator_switch_ops,
390 .type = REGULATOR_VOLTAGE,
391 .owner = THIS_MODULE,
392 },
393 .epod_id = EPOD_ID_SIAMMDSP,
394 .is_ramret = true,
395 },
396 [DB8500_REGULATOR_SWITCH_SIAPIPE] = {
397 .desc = {
398 .name = "db8500-sia-pipe",
399 .id = DB8500_REGULATOR_SWITCH_SIAPIPE,
400 .ops = &db8500_regulator_switch_ops,
401 .type = REGULATOR_VOLTAGE,
402 .owner = THIS_MODULE,
403 },
404 .epod_id = EPOD_ID_SIAPIPE,
405 },
406 [DB8500_REGULATOR_SWITCH_SGA] = {
407 .desc = {
408 .name = "db8500-sga",
409 .id = DB8500_REGULATOR_SWITCH_SGA,
410 .ops = &db8500_regulator_switch_ops,
411 .type = REGULATOR_VOLTAGE,
412 .owner = THIS_MODULE,
413 },
414 .epod_id = EPOD_ID_SGA,
415 },
416 [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
417 .desc = {
418 .name = "db8500-b2r2-mcde",
419 .id = DB8500_REGULATOR_SWITCH_B2R2_MCDE,
420 .ops = &db8500_regulator_switch_ops,
421 .type = REGULATOR_VOLTAGE,
422 .owner = THIS_MODULE,
423 },
424 .epod_id = EPOD_ID_B2R2_MCDE,
425 },
426 [DB8500_REGULATOR_SWITCH_ESRAM12] = {
427 .desc = {
428 .name = "db8500-esram12",
429 .id = DB8500_REGULATOR_SWITCH_ESRAM12,
430 .ops = &db8500_regulator_switch_ops,
431 .type = REGULATOR_VOLTAGE,
432 .owner = THIS_MODULE,
433 },
434 .epod_id = EPOD_ID_ESRAM12,
435 .is_enabled = true,
436 },
437 [DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
438 .desc = {
439 .name = "db8500-esram12-ret",
440 .id = DB8500_REGULATOR_SWITCH_ESRAM12RET,
441 .ops = &db8500_regulator_switch_ops,
442 .type = REGULATOR_VOLTAGE,
443 .owner = THIS_MODULE,
444 },
445 .epod_id = EPOD_ID_ESRAM12,
446 .is_ramret = true,
447 },
448 [DB8500_REGULATOR_SWITCH_ESRAM34] = {
449 .desc = {
450 .name = "db8500-esram34",
451 .id = DB8500_REGULATOR_SWITCH_ESRAM34,
452 .ops = &db8500_regulator_switch_ops,
453 .type = REGULATOR_VOLTAGE,
454 .owner = THIS_MODULE,
455 },
456 .epod_id = EPOD_ID_ESRAM34,
457 .is_enabled = true,
458 },
459 [DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
460 .desc = {
461 .name = "db8500-esram34-ret",
462 .id = DB8500_REGULATOR_SWITCH_ESRAM34RET,
463 .ops = &db8500_regulator_switch_ops,
464 .type = REGULATOR_VOLTAGE,
465 .owner = THIS_MODULE,
466 },
467 .epod_id = EPOD_ID_ESRAM34,
468 .is_ramret = true,
469 },
470};
471
472static int __devinit db8500_regulator_probe(struct platform_device *pdev)
473{
474 struct regulator_init_data *db8500_init_data = mfd_get_data(pdev);
475 int i, err;
476
477 /* register all regulators */
478 for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) {
479 struct db8500_regulator_info *info;
480 struct regulator_init_data *init_data = &db8500_init_data[i];
481
482 /* assign per-regulator data */
483 info = &db8500_regulator_info[i];
484 info->dev = &pdev->dev;
485
486 /* register with the regulator framework */
487 info->rdev = regulator_register(&info->desc, &pdev->dev,
488 init_data, info);
489 if (IS_ERR(info->rdev)) {
490 err = PTR_ERR(info->rdev);
491 dev_err(&pdev->dev, "failed to register %s: err %i\n",
492 info->desc.name, err);
493
494 /* if failing, unregister all earlier regulators */
495 i--;
496 while (i >= 0) {
497 info = &db8500_regulator_info[i];
498 regulator_unregister(info->rdev);
499 i--;
500 }
501 return err;
502 }
503
504 dev_dbg(rdev_get_dev(info->rdev),
505 "regulator-%s-probed\n", info->desc.name);
506 }
507
508 return 0;
509}
510
511static int __exit db8500_regulator_remove(struct platform_device *pdev)
512{
513 int i;
514
515 for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) {
516 struct db8500_regulator_info *info;
517 info = &db8500_regulator_info[i];
518
519 dev_vdbg(rdev_get_dev(info->rdev),
520 "regulator-%s-remove\n", info->desc.name);
521
522 regulator_unregister(info->rdev);
523 }
524
525 return 0;
526}
527
528static struct platform_driver db8500_regulator_driver = {
529 .driver = {
530 .name = "db8500-prcmu-regulators",
531 .owner = THIS_MODULE,
532 },
533 .probe = db8500_regulator_probe,
534 .remove = __exit_p(db8500_regulator_remove),
535};
536
537static int __init db8500_regulator_init(void)
538{
539 int ret;
540
541 ret = platform_driver_register(&db8500_regulator_driver);
542 if (ret < 0)
543 return -ENODEV;
544
545 return 0;
546}
547
548static void __exit db8500_regulator_exit(void)
549{
550 platform_driver_unregister(&db8500_regulator_driver);
551}
552
553arch_initcall(db8500_regulator_init);
554module_exit(db8500_regulator_exit);
555
556MODULE_AUTHOR("STMicroelectronics/ST-Ericsson");
557MODULE_DESCRIPTION("DB8500 regulator driver");
558MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 42891726ea72..b8f4e9e66cd5 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -992,4 +992,11 @@ config RTC_DRV_TEGRA
992 This drive can also be built as a module. If so, the module 992 This drive can also be built as a module. If so, the module
993 will be called rtc-tegra. 993 will be called rtc-tegra.
994 994
995config RTC_DRV_TILE
996 tristate "Tilera hypervisor RTC support"
997 depends on TILE
998 help
999 Enable support for the Linux driver side of the Tilera
1000 hypervisor's real-time clock interface.
1001
995endif # RTC_CLASS 1002endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index ca91c3c42e98..9574748d1c73 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
93obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o 93obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
94obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o 94obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
95obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o 95obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
96obj-$(CONFIG_RTC_DRV_TILE) += rtc-tile.o
96obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o 97obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
97obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o 98obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
98obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o 99obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
diff --git a/drivers/rtc/rtc-tile.c b/drivers/rtc/rtc-tile.c
new file mode 100644
index 000000000000..eb65dafee66e
--- /dev/null
+++ b/drivers/rtc/rtc-tile.c
@@ -0,0 +1,162 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Tilera-specific RTC driver.
15 */
16
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/rtc.h>
20#include <linux/platform_device.h>
21
22/* Platform device pointer. */
23static struct platform_device *tile_rtc_platform_device;
24
25/*
26 * RTC read routine. Gets time info from RTC chip via hypervisor syscall.
27 */
28static int read_rtc_time(struct device *dev, struct rtc_time *tm)
29{
30 HV_RTCTime hvtm = hv_get_rtc();
31
32 tm->tm_sec = hvtm.tm_sec;
33 tm->tm_min = hvtm.tm_min;
34 tm->tm_hour = hvtm.tm_hour;
35 tm->tm_mday = hvtm.tm_mday;
36 tm->tm_mon = hvtm.tm_mon;
37 tm->tm_year = hvtm.tm_year;
38 tm->tm_wday = 0;
39 tm->tm_yday = 0;
40 tm->tm_isdst = 0;
41
42 if (rtc_valid_tm(tm) < 0)
43 dev_warn(dev, "Read invalid date/time from RTC\n");
44
45 return 0;
46}
47
48/*
49 * RTC write routine. Sends time info to hypervisor via syscall, to be
50 * written to RTC chip.
51 */
52static int set_rtc_time(struct device *dev, struct rtc_time *tm)
53{
54 HV_RTCTime hvtm;
55
56 hvtm.tm_sec = tm->tm_sec;
57 hvtm.tm_min = tm->tm_min;
58 hvtm.tm_hour = tm->tm_hour;
59 hvtm.tm_mday = tm->tm_mday;
60 hvtm.tm_mon = tm->tm_mon;
61 hvtm.tm_year = tm->tm_year;
62
63 hv_set_rtc(hvtm);
64
65 return 0;
66}
67
68/*
69 * RTC read/write ops.
70 */
71static const struct rtc_class_ops tile_rtc_ops = {
72 .read_time = read_rtc_time,
73 .set_time = set_rtc_time,
74};
75
76/*
77 * Device probe routine.
78 */
79static int __devinit tile_rtc_probe(struct platform_device *dev)
80{
81 struct rtc_device *rtc;
82
83 rtc = rtc_device_register("tile",
84 &dev->dev, &tile_rtc_ops, THIS_MODULE);
85
86 if (IS_ERR(rtc))
87 return PTR_ERR(rtc);
88
89 platform_set_drvdata(dev, rtc);
90
91 return 0;
92}
93
94/*
95 * Device cleanup routine.
96 */
97static int __devexit tile_rtc_remove(struct platform_device *dev)
98{
99 struct rtc_device *rtc = platform_get_drvdata(dev);
100
101 if (rtc)
102 rtc_device_unregister(rtc);
103
104 platform_set_drvdata(dev, NULL);
105
106 return 0;
107}
108
109static struct platform_driver tile_rtc_platform_driver = {
110 .driver = {
111 .name = "rtc-tile",
112 .owner = THIS_MODULE,
113 },
114 .probe = tile_rtc_probe,
115 .remove = __devexit_p(tile_rtc_remove),
116};
117
118/*
119 * Driver init routine.
120 */
121static int __init tile_rtc_driver_init(void)
122{
123 int err;
124
125 err = platform_driver_register(&tile_rtc_platform_driver);
126 if (err)
127 return err;
128
129 tile_rtc_platform_device = platform_device_alloc("rtc-tile", 0);
130 if (tile_rtc_platform_device == NULL) {
131 err = -ENOMEM;
132 goto exit_driver_unregister;
133 }
134
135 err = platform_device_add(tile_rtc_platform_device);
136 if (err)
137 goto exit_device_put;
138
139 return 0;
140
141exit_device_put:
142 platform_device_put(tile_rtc_platform_device);
143
144exit_driver_unregister:
145 platform_driver_unregister(&tile_rtc_platform_driver);
146 return err;
147}
148
149/*
150 * Driver cleanup routine.
151 */
152static void __exit tile_rtc_driver_exit(void)
153{
154 platform_driver_unregister(&tile_rtc_platform_driver);
155}
156
157module_init(tile_rtc_driver_init);
158module_exit(tile_rtc_driver_exit);
159
160MODULE_DESCRIPTION("Tilera-specific Real Time Clock Driver");
161MODULE_LICENSE("GPL");
162MODULE_ALIAS("platform:rtc-tile");
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 35381cb0936e..03e522b2fe0b 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -655,6 +655,27 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
655 return 0; 655 return 0;
656} 656}
657 657
658#ifndef readq
659static inline __u64 readq(const volatile void __iomem *addr)
660{
661 const volatile u32 __iomem *p = addr;
662 u32 low, high;
663
664 low = readl(p);
665 high = readl(p + 1);
666
667 return low + ((u64)high << 32);
668}
669#endif
670
671#ifndef writeq
672static inline void writeq(__u64 val, volatile void __iomem *addr)
673{
674 writel(val, addr);
675 writel(val >> 32, addr+4);
676}
677#endif
678
658static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha, 679static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
659 u64 off, void *data, int size) 680 u64 off, void *data, int size)
660{ 681{
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 95019c747cc1..4778e2707168 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -636,7 +636,7 @@ static int sr_probe(struct device *dev)
636 disk->first_minor = minor; 636 disk->first_minor = minor;
637 sprintf(disk->disk_name, "sr%d", minor); 637 sprintf(disk->disk_name, "sr%d", minor);
638 disk->fops = &sr_bdops; 638 disk->fops = &sr_bdops;
639 disk->flags = GENHD_FL_CD; 639 disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
640 disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST; 640 disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
641 641
642 blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT); 642 blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache.c
index b8a2b30a1572..77ac2d4d3ef1 100644
--- a/drivers/staging/zcache/zcache.c
+++ b/drivers/staging/zcache/zcache.c
@@ -1181,9 +1181,12 @@ static bool zcache_freeze;
1181/* 1181/*
1182 * zcache shrinker interface (only useful for ephemeral pages, so zbud only) 1182 * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
1183 */ 1183 */
1184static int shrink_zcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1184static int shrink_zcache_memory(struct shrinker *shrink,
1185 struct shrink_control *sc)
1185{ 1186{
1186 int ret = -1; 1187 int ret = -1;
1188 int nr = sc->nr_to_scan;
1189 gfp_t gfp_mask = sc->gfp_mask;
1187 1190
1188 if (nr >= 0) { 1191 if (nr >= 0) {
1189 if (!(gfp_mask & __GFP_FS)) 1192 if (!(gfp_mask & __GFP_FS))
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index d5bfd41707e7..e0a77540b8ca 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -281,7 +281,7 @@ static void receive_chars(struct m68k_serial *info, unsigned short rx)
281#ifdef CONFIG_MAGIC_SYSRQ 281#ifdef CONFIG_MAGIC_SYSRQ
282 } else if (ch == 0x10) { /* ^P */ 282 } else if (ch == 0x10) { /* ^P */
283 show_state(); 283 show_state();
284 show_free_areas(); 284 show_free_areas(0);
285 show_buffers(); 285 show_buffers();
286/* show_net_buffers(); */ 286/* show_net_buffers(); */
287 return; 287 return;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index c63d0d152af6..f2cb7503fcb2 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -15,6 +15,7 @@
15 *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. 15 *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
16 */ 16 */
17#include <linux/serial_reg.h> 17#include <linux/serial_reg.h>
18#include <linux/slab.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/pci.h> 20#include <linux/pci.h>
20#include <linux/serial_core.h> 21#include <linux/serial_core.h>
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index f9916ca5ca4d..549b960667c8 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1460,6 +1460,14 @@ config FB_S3
1460 ---help--- 1460 ---help---
1461 Driver for graphics boards with S3 Trio / S3 Virge chip. 1461 Driver for graphics boards with S3 Trio / S3 Virge chip.
1462 1462
1463config FB_S3_DDC
1464 bool "DDC for S3 support"
1465 depends on FB_S3
1466 select FB_DDC
1467 default y
1468 help
1469 Say Y here if you want DDC support for your S3 graphics card.
1470
1463config FB_SAVAGE 1471config FB_SAVAGE
1464 tristate "S3 Savage support" 1472 tristate "S3 Savage support"
1465 depends on FB && PCI && EXPERIMENTAL 1473 depends on FB && PCI && EXPERIMENTAL
@@ -1983,6 +1991,18 @@ config FB_SH_MOBILE_HDMI
1983 ---help--- 1991 ---help---
1984 Driver for the on-chip SH-Mobile HDMI controller. 1992 Driver for the on-chip SH-Mobile HDMI controller.
1985 1993
1994config FB_SH_MOBILE_MERAM
1995 tristate "SuperH Mobile MERAM read ahead support for LCDC"
1996 depends on FB_SH_MOBILE_LCDC
1997 default y
1998 ---help---
1999 Enable MERAM support for the SH-Mobile LCD controller.
2000
2001 This will allow for caching of the framebuffer to provide more
2002 reliable access under heavy main memory bus traffic situations.
2003 Up to 4 memory channels can be configured, allowing 4 RGB or
2004 2 YCbCr framebuffers to be configured.
2005
1986config FB_TMIO 2006config FB_TMIO
1987 tristate "Toshiba Mobile IO FrameBuffer support" 2007 tristate "Toshiba Mobile IO FrameBuffer support"
1988 depends on FB && MFD_CORE 2008 depends on FB && MFD_CORE
@@ -2246,29 +2266,43 @@ config FB_METRONOME
2246config FB_MB862XX 2266config FB_MB862XX
2247 tristate "Fujitsu MB862xx GDC support" 2267 tristate "Fujitsu MB862xx GDC support"
2248 depends on FB 2268 depends on FB
2269 depends on PCI || (OF && PPC)
2249 select FB_CFB_FILLRECT 2270 select FB_CFB_FILLRECT
2250 select FB_CFB_COPYAREA 2271 select FB_CFB_COPYAREA
2251 select FB_CFB_IMAGEBLIT 2272 select FB_CFB_IMAGEBLIT
2252 ---help--- 2273 ---help---
2253 Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers. 2274 Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
2254 2275
2276choice
2277 prompt "GDC variant"
2278 depends on FB_MB862XX
2279
2255config FB_MB862XX_PCI_GDC 2280config FB_MB862XX_PCI_GDC
2256 bool "Carmine/Coral-P(A) GDC" 2281 bool "Carmine/Coral-P(A) GDC"
2257 depends on PCI && FB_MB862XX 2282 depends on PCI
2258 ---help--- 2283 ---help---
2259 This enables framebuffer support for Fujitsu Carmine/Coral-P(A) 2284 This enables framebuffer support for Fujitsu Carmine/Coral-P(A)
2260 PCI graphics controller devices. 2285 PCI graphics controller devices.
2261 2286
2262config FB_MB862XX_LIME 2287config FB_MB862XX_LIME
2263 bool "Lime GDC" 2288 bool "Lime GDC"
2264 depends on FB_MB862XX 2289 depends on OF && PPC
2265 depends on OF && !FB_MB862XX_PCI_GDC
2266 depends on PPC
2267 select FB_FOREIGN_ENDIAN 2290 select FB_FOREIGN_ENDIAN
2268 select FB_LITTLE_ENDIAN 2291 select FB_LITTLE_ENDIAN
2269 ---help--- 2292 ---help---
2270 Framebuffer support for Fujitsu Lime GDC on host CPU bus. 2293 Framebuffer support for Fujitsu Lime GDC on host CPU bus.
2271 2294
2295endchoice
2296
2297config FB_MB862XX_I2C
2298 bool "Support I2C bus on MB862XX GDC"
2299 depends on FB_MB862XX && I2C
2300 default y
2301 help
2302 Selecting this option adds Coral-P(A)/Lime GDC I2C bus adapter
2303 driver to support accessing I2C devices on controller's I2C bus.
2304 These are usually some video decoder chips.
2305
2272config FB_EP93XX 2306config FB_EP93XX
2273 tristate "EP93XX frame buffer support" 2307 tristate "EP93XX frame buffer support"
2274 depends on FB && ARCH_EP93XX 2308 depends on FB && ARCH_EP93XX
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 2ea44b6625fe..8b83129e209c 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -130,6 +130,7 @@ obj-$(CONFIG_FB_UDL) += udlfb.o
130obj-$(CONFIG_FB_XILINX) += xilinxfb.o 130obj-$(CONFIG_FB_XILINX) += xilinxfb.o
131obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o 131obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o
132obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o 132obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o
133obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o
133obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o 134obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
134obj-$(CONFIG_FB_OMAP) += omap/ 135obj-$(CONFIG_FB_OMAP) += omap/
135obj-y += omap2/ 136obj-y += omap2/
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index e5d6b56d4447..5ea6596dd824 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -2224,22 +2224,23 @@ static int amifb_ioctl(struct fb_info *info,
2224 * Allocate, Clear and Align a Block of Chip Memory 2224 * Allocate, Clear and Align a Block of Chip Memory
2225 */ 2225 */
2226 2226
2227static u_long unaligned_chipptr = 0; 2227static void *aligned_chipptr;
2228 2228
2229static inline u_long __init chipalloc(u_long size) 2229static inline u_long __init chipalloc(u_long size)
2230{ 2230{
2231 size += PAGE_SIZE-1; 2231 aligned_chipptr = amiga_chip_alloc(size, "amifb [RAM]");
2232 if (!(unaligned_chipptr = (u_long)amiga_chip_alloc(size, 2232 if (!aligned_chipptr) {
2233 "amifb [RAM]"))) 2233 pr_err("amifb: No Chip RAM for frame buffer");
2234 panic("No Chip RAM for frame buffer"); 2234 return 0;
2235 memset((void *)unaligned_chipptr, 0, size); 2235 }
2236 return PAGE_ALIGN(unaligned_chipptr); 2236 memset(aligned_chipptr, 0, size);
2237 return (u_long)aligned_chipptr;
2237} 2238}
2238 2239
2239static inline void chipfree(void) 2240static inline void chipfree(void)
2240{ 2241{
2241 if (unaligned_chipptr) 2242 if (aligned_chipptr)
2242 amiga_chip_free((void *)unaligned_chipptr); 2243 amiga_chip_free(aligned_chipptr);
2243} 2244}
2244 2245
2245 2246
@@ -2295,7 +2296,7 @@ default_chipset:
2295 defmode = amiga_vblank == 50 ? DEFMODE_PAL 2296 defmode = amiga_vblank == 50 ? DEFMODE_PAL
2296 : DEFMODE_NTSC; 2297 : DEFMODE_NTSC;
2297 if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT > 2298 if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT >
2298 VIDEOMEMSIZE_ECS_1M) 2299 VIDEOMEMSIZE_ECS_2M)
2299 fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_2M; 2300 fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_2M;
2300 else 2301 else
2301 fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_1M; 2302 fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_1M;
@@ -2312,7 +2313,7 @@ default_chipset:
2312 maxfmode = TAG_FMODE_4; 2313 maxfmode = TAG_FMODE_4;
2313 defmode = DEFMODE_AGA; 2314 defmode = DEFMODE_AGA;
2314 if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT > 2315 if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT >
2315 VIDEOMEMSIZE_AGA_1M) 2316 VIDEOMEMSIZE_AGA_2M)
2316 fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_2M; 2317 fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_2M;
2317 else 2318 else
2318 fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_1M; 2319 fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_1M;
@@ -2385,6 +2386,10 @@ default_chipset:
2385 DUMMYSPRITEMEMSIZE+ 2386 DUMMYSPRITEMEMSIZE+
2386 COPINITSIZE+ 2387 COPINITSIZE+
2387 4*COPLISTSIZE); 2388 4*COPLISTSIZE);
2389 if (!chipptr) {
2390 err = -ENOMEM;
2391 goto amifb_error;
2392 }
2388 2393
2389 assignchunk(videomemory, u_long, chipptr, fb_info.fix.smem_len); 2394 assignchunk(videomemory, u_long, chipptr, fb_info.fix.smem_len);
2390 assignchunk(spritememory, u_long, chipptr, SPRITEMEMSIZE); 2395 assignchunk(spritememory, u_long, chipptr, SPRITEMEMSIZE);
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index af3119707dbf..d1aee730d7d8 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -211,8 +211,12 @@ static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
211 const char *buf, size_t count) 211 const char *buf, size_t count)
212{ 212{
213 struct adp5520_bl *data = dev_get_drvdata(dev); 213 struct adp5520_bl *data = dev_get_drvdata(dev);
214 int ret;
215
216 ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
217 if (ret < 0)
218 return ret;
214 219
215 strict_strtoul(buf, 10, &data->cached_daylight_max);
216 return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_MAX); 220 return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_MAX);
217} 221}
218static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show, 222static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show,
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 8b7d47386f39..fcdac872522d 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -899,7 +899,7 @@ static struct fb_ops da8xx_fb_ops = {
899 .fb_blank = cfb_blank, 899 .fb_blank = cfb_blank,
900}; 900};
901 901
902static int __init fb_probe(struct platform_device *device) 902static int __devinit fb_probe(struct platform_device *device)
903{ 903{
904 struct da8xx_lcdc_platform_data *fb_pdata = 904 struct da8xx_lcdc_platform_data *fb_pdata =
905 device->dev.platform_data; 905 device->dev.platform_data;
@@ -1165,7 +1165,7 @@ static int fb_resume(struct platform_device *dev)
1165 1165
1166static struct platform_driver da8xx_fb_driver = { 1166static struct platform_driver da8xx_fb_driver = {
1167 .probe = fb_probe, 1167 .probe = fb_probe,
1168 .remove = fb_remove, 1168 .remove = __devexit_p(fb_remove),
1169 .suspend = fb_suspend, 1169 .suspend = fb_suspend,
1170 .resume = fb_resume, 1170 .resume = fb_resume,
1171 .driver = { 1171 .driver = {
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 4eb38db36e4b..fb205843c2c7 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -242,9 +242,9 @@ static int set_system(const struct dmi_system_id *id)
242 return 0; 242 return 0;
243 } 243 }
244 244
245 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p " 245 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at 0x%08x "
246 "(%dx%d, stride %d)\n", id->ident, 246 "(%dx%d, stride %d)\n", id->ident,
247 (void *)screen_info.lfb_base, screen_info.lfb_width, 247 screen_info.lfb_base, screen_info.lfb_width,
248 screen_info.lfb_height, screen_info.lfb_linelength); 248 screen_info.lfb_height, screen_info.lfb_linelength);
249 249
250 250
diff --git a/drivers/video/mb862xx/Makefile b/drivers/video/mb862xx/Makefile
index d7777714166b..5707ed0e31a7 100644
--- a/drivers/video/mb862xx/Makefile
+++ b/drivers/video/mb862xx/Makefile
@@ -2,4 +2,7 @@
2# Makefile for the MB862xx framebuffer driver 2# Makefile for the MB862xx framebuffer driver
3# 3#
4 4
5obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o mb862xxfb_accel.o 5obj-$(CONFIG_FB_MB862XX) += mb862xxfb.o
6
7mb862xxfb-y := mb862xxfbdrv.o mb862xxfb_accel.o
8mb862xxfb-$(CONFIG_FB_MB862XX_I2C) += mb862xx-i2c.o
diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/mb862xx/mb862xx-i2c.c
new file mode 100644
index 000000000000..cb77d3b4657d
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xx-i2c.c
@@ -0,0 +1,177 @@
1/*
2 * Coral-P(A)/Lime I2C adapter driver
3 *
4 * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/fb.h>
13#include <linux/i2c.h>
14#include <linux/io.h>
15
16#include "mb862xxfb.h"
17#include "mb862xx_reg.h"
18
19static int mb862xx_i2c_wait_event(struct i2c_adapter *adap)
20{
21 struct mb862xxfb_par *par = adap->algo_data;
22 u32 reg;
23
24 do {
25 udelay(1);
26 reg = inreg(i2c, GC_I2C_BCR);
27 if (reg & (I2C_INT | I2C_BER))
28 break;
29 } while (1);
30
31 return (reg & I2C_BER) ? 0 : 1;
32}
33
34static int mb862xx_i2c_do_address(struct i2c_adapter *adap, int addr)
35{
36 struct mb862xxfb_par *par = adap->algo_data;
37
38 outreg(i2c, GC_I2C_DAR, addr);
39 outreg(i2c, GC_I2C_CCR, I2C_CLOCK_AND_ENABLE);
40 outreg(i2c, GC_I2C_BCR, par->i2c_rs ? I2C_REPEATED_START : I2C_START);
41 if (!mb862xx_i2c_wait_event(adap))
42 return -EIO;
43 par->i2c_rs = !(inreg(i2c, GC_I2C_BSR) & I2C_LRB);
44 return par->i2c_rs;
45}
46
47static int mb862xx_i2c_write_byte(struct i2c_adapter *adap, u8 byte)
48{
49 struct mb862xxfb_par *par = adap->algo_data;
50
51 outreg(i2c, GC_I2C_DAR, byte);
52 outreg(i2c, GC_I2C_BCR, I2C_START);
53 if (!mb862xx_i2c_wait_event(adap))
54 return -EIO;
55 return !(inreg(i2c, GC_I2C_BSR) & I2C_LRB);
56}
57
58static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last)
59{
60 struct mb862xxfb_par *par = adap->algo_data;
61
62 outreg(i2c, GC_I2C_BCR, I2C_START | (last ? 0 : I2C_ACK));
63 if (!mb862xx_i2c_wait_event(adap))
64 return 0;
65 *byte = inreg(i2c, GC_I2C_DAR);
66 return 1;
67}
68
69void mb862xx_i2c_stop(struct i2c_adapter *adap)
70{
71 struct mb862xxfb_par *par = adap->algo_data;
72
73 outreg(i2c, GC_I2C_BCR, I2C_STOP);
74 outreg(i2c, GC_I2C_CCR, I2C_DISABLE);
75 par->i2c_rs = 0;
76}
77
78static int mb862xx_i2c_read(struct i2c_adapter *adap, struct i2c_msg *m)
79{
80 int i, ret = 0;
81 int last = m->len - 1;
82
83 for (i = 0; i < m->len; i++) {
84 if (!mb862xx_i2c_read_byte(adap, &m->buf[i], i == last)) {
85 ret = -EIO;
86 break;
87 }
88 }
89 return ret;
90}
91
92static int mb862xx_i2c_write(struct i2c_adapter *adap, struct i2c_msg *m)
93{
94 int i, ret = 0;
95
96 for (i = 0; i < m->len; i++) {
97 if (!mb862xx_i2c_write_byte(adap, m->buf[i])) {
98 ret = -EIO;
99 break;
100 }
101 }
102 return ret;
103}
104
105static int mb862xx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
106 int num)
107{
108 struct mb862xxfb_par *par = adap->algo_data;
109 struct i2c_msg *m;
110 int addr;
111 int i = 0, err = 0;
112
113 dev_dbg(par->dev, "%s: %d msgs\n", __func__, num);
114
115 for (i = 0; i < num; i++) {
116 m = &msgs[i];
117 if (!m->len) {
118 dev_dbg(par->dev, "%s: null msgs\n", __func__);
119 continue;
120 }
121 addr = m->addr;
122 if (m->flags & I2C_M_RD)
123 addr |= 1;
124
125 err = mb862xx_i2c_do_address(adap, addr);
126 if (err < 0)
127 break;
128 if (m->flags & I2C_M_RD)
129 err = mb862xx_i2c_read(adap, m);
130 else
131 err = mb862xx_i2c_write(adap, m);
132 }
133
134 if (i)
135 mb862xx_i2c_stop(adap);
136
137 return (err < 0) ? err : i;
138}
139
140static u32 mb862xx_func(struct i2c_adapter *adap)
141{
142 return I2C_FUNC_SMBUS_BYTE_DATA;
143}
144
145static const struct i2c_algorithm mb862xx_algo = {
146 .master_xfer = mb862xx_xfer,
147 .functionality = mb862xx_func,
148};
149
150static struct i2c_adapter mb862xx_i2c_adapter = {
151 .name = "MB862xx I2C adapter",
152 .algo = &mb862xx_algo,
153 .owner = THIS_MODULE,
154};
155
156int mb862xx_i2c_init(struct mb862xxfb_par *par)
157{
158 int ret;
159
160 mb862xx_i2c_adapter.algo_data = par;
161 par->adap = &mb862xx_i2c_adapter;
162
163 ret = i2c_add_adapter(par->adap);
164 if (ret < 0) {
165 dev_err(par->dev, "failed to add %s\n",
166 mb862xx_i2c_adapter.name);
167 }
168 return ret;
169}
170
171void mb862xx_i2c_exit(struct mb862xxfb_par *par)
172{
173 if (par->adap) {
174 i2c_del_adapter(par->adap);
175 par->adap = NULL;
176 }
177}
diff --git a/drivers/video/mb862xx/mb862xx_reg.h b/drivers/video/mb862xx/mb862xx_reg.h
index 2ba65e118500..9df48b8edc94 100644
--- a/drivers/video/mb862xx/mb862xx_reg.h
+++ b/drivers/video/mb862xx/mb862xx_reg.h
@@ -5,11 +5,8 @@
5#ifndef _MB862XX_REG_H 5#ifndef _MB862XX_REG_H
6#define _MB862XX_REG_H 6#define _MB862XX_REG_H
7 7
8#ifdef MB862XX_MMIO_BOTTOM
9#define MB862XX_MMIO_BASE 0x03fc0000
10#else
11#define MB862XX_MMIO_BASE 0x01fc0000 8#define MB862XX_MMIO_BASE 0x01fc0000
12#endif 9#define MB862XX_MMIO_HIGH_BASE 0x03fc0000
13#define MB862XX_I2C_BASE 0x0000c000 10#define MB862XX_I2C_BASE 0x0000c000
14#define MB862XX_DISP_BASE 0x00010000 11#define MB862XX_DISP_BASE 0x00010000
15#define MB862XX_CAP_BASE 0x00018000 12#define MB862XX_CAP_BASE 0x00018000
@@ -23,6 +20,7 @@
23#define GC_IMASK 0x00000024 20#define GC_IMASK 0x00000024
24#define GC_SRST 0x0000002c 21#define GC_SRST 0x0000002c
25#define GC_CCF 0x00000038 22#define GC_CCF 0x00000038
23#define GC_RSW 0x0000005c
26#define GC_CID 0x000000f0 24#define GC_CID 0x000000f0
27#define GC_REVISION 0x00000084 25#define GC_REVISION 0x00000084
28 26
@@ -53,10 +51,16 @@
53#define GC_L0OA0 0x00000024 51#define GC_L0OA0 0x00000024
54#define GC_L0DA0 0x00000028 52#define GC_L0DA0 0x00000028
55#define GC_L0DY_L0DX 0x0000002c 53#define GC_L0DY_L0DX 0x0000002c
54#define GC_L1M 0x00000030
55#define GC_L1DA 0x00000034
56#define GC_DCM1 0x00000100 56#define GC_DCM1 0x00000100
57#define GC_L0EM 0x00000110 57#define GC_L0EM 0x00000110
58#define GC_L0WY_L0WX 0x00000114 58#define GC_L0WY_L0WX 0x00000114
59#define GC_L0WH_L0WW 0x00000118 59#define GC_L0WH_L0WW 0x00000118
60#define GC_L1EM 0x00000120
61#define GC_L1WY_L1WX 0x00000124
62#define GC_L1WH_L1WW 0x00000128
63#define GC_DLS 0x00000180
60#define GC_DCM2 0x00000104 64#define GC_DCM2 0x00000104
61#define GC_DCM3 0x00000108 65#define GC_DCM3 0x00000108
62#define GC_CPM_CUTC 0x000000a0 66#define GC_CPM_CUTC 0x000000a0
@@ -68,6 +72,11 @@
68 72
69#define GC_CPM_CEN0 0x00100000 73#define GC_CPM_CEN0 0x00100000
70#define GC_CPM_CEN1 0x00200000 74#define GC_CPM_CEN1 0x00200000
75#define GC_DCM1_DEN 0x80000000
76#define GC_DCM1_L1E 0x00020000
77#define GC_L1M_16 0x80000000
78#define GC_L1M_YC 0x40000000
79#define GC_L1M_CS 0x20000000
71 80
72#define GC_DCM01_ESY 0x00000004 81#define GC_DCM01_ESY 0x00000004
73#define GC_DCM01_SC 0x00003f00 82#define GC_DCM01_SC 0x00003f00
@@ -79,9 +88,50 @@
79#define GC_L0M_L0C_16 0x80000000 88#define GC_L0M_L0C_16 0x80000000
80#define GC_L0EM_L0EC_24 0x40000000 89#define GC_L0EM_L0EC_24 0x40000000
81#define GC_L0M_L0W_UNIT 64 90#define GC_L0M_L0W_UNIT 64
91#define GC_L1EM_DM 0x02000000
82 92
83#define GC_DISP_REFCLK_400 400 93#define GC_DISP_REFCLK_400 400
84 94
95/* I2C */
96#define GC_I2C_BSR 0x00000000 /* BSR */
97#define GC_I2C_BCR 0x00000004 /* BCR */
98#define GC_I2C_CCR 0x00000008 /* CCR */
99#define GC_I2C_ADR 0x0000000C /* ADR */
100#define GC_I2C_DAR 0x00000010 /* DAR */
101
102#define I2C_DISABLE 0x00000000
103#define I2C_STOP 0x00000000
104#define I2C_START 0x00000010
105#define I2C_REPEATED_START 0x00000030
106#define I2C_CLOCK_AND_ENABLE 0x0000003f
107#define I2C_READY 0x01
108#define I2C_INT 0x01
109#define I2C_INTE 0x02
110#define I2C_ACK 0x08
111#define I2C_BER 0x80
112#define I2C_BEIE 0x40
113#define I2C_TRX 0x80
114#define I2C_LRB 0x10
115
116/* Capture registers and bits */
117#define GC_CAP_VCM 0x00000000
118#define GC_CAP_CSC 0x00000004
119#define GC_CAP_VCS 0x00000008
120#define GC_CAP_CBM 0x00000010
121#define GC_CAP_CBOA 0x00000014
122#define GC_CAP_CBLA 0x00000018
123#define GC_CAP_IMG_START 0x0000001C
124#define GC_CAP_IMG_END 0x00000020
125#define GC_CAP_CMSS 0x00000048
126#define GC_CAP_CMDS 0x0000004C
127
128#define GC_VCM_VIE 0x80000000
129#define GC_VCM_CM 0x03000000
130#define GC_VCM_VS_PAL 0x00000002
131#define GC_CBM_OO 0x80000000
132#define GC_CBM_HRV 0x00000010
133#define GC_CBM_CBST 0x00000001
134
85/* Carmine specific */ 135/* Carmine specific */
86#define MB86297_DRAW_BASE 0x00020000 136#define MB86297_DRAW_BASE 0x00020000
87#define MB86297_DISP0_BASE 0x00100000 137#define MB86297_DISP0_BASE 0x00100000
diff --git a/drivers/video/mb862xx/mb862xxfb.h b/drivers/video/mb862xx/mb862xxfb.h
index d7e7cb76bbf2..8550630c1e01 100644
--- a/drivers/video/mb862xx/mb862xxfb.h
+++ b/drivers/video/mb862xx/mb862xxfb.h
@@ -1,6 +1,26 @@
1#ifndef __MB862XX_H__ 1#ifndef __MB862XX_H__
2#define __MB862XX_H__ 2#define __MB862XX_H__
3 3
4struct mb862xx_l1_cfg {
5 unsigned short sx;
6 unsigned short sy;
7 unsigned short sw;
8 unsigned short sh;
9 unsigned short dx;
10 unsigned short dy;
11 unsigned short dw;
12 unsigned short dh;
13 int mirror;
14};
15
16#define MB862XX_BASE 'M'
17#define MB862XX_L1_GET_CFG _IOR(MB862XX_BASE, 0, struct mb862xx_l1_cfg*)
18#define MB862XX_L1_SET_CFG _IOW(MB862XX_BASE, 1, struct mb862xx_l1_cfg*)
19#define MB862XX_L1_ENABLE _IOW(MB862XX_BASE, 2, int)
20#define MB862XX_L1_CAP_CTL _IOW(MB862XX_BASE, 3, int)
21
22#ifdef __KERNEL__
23
4#define PCI_VENDOR_ID_FUJITSU_LIMITED 0x10cf 24#define PCI_VENDOR_ID_FUJITSU_LIMITED 0x10cf
5#define PCI_DEVICE_ID_FUJITSU_CORALP 0x2019 25#define PCI_DEVICE_ID_FUJITSU_CORALP 0x2019
6#define PCI_DEVICE_ID_FUJITSU_CORALPA 0x201e 26#define PCI_DEVICE_ID_FUJITSU_CORALPA 0x201e
@@ -38,6 +58,8 @@ struct mb862xxfb_par {
38 void __iomem *mmio_base; /* remapped registers */ 58 void __iomem *mmio_base; /* remapped registers */
39 size_t mapped_vram; /* length of remapped vram */ 59 size_t mapped_vram; /* length of remapped vram */
40 size_t mmio_len; /* length of register region */ 60 size_t mmio_len; /* length of register region */
61 unsigned long cap_buf; /* capture buffers offset */
62 size_t cap_len; /* length of capture buffers */
41 63
42 void __iomem *host; /* relocatable reg. bases */ 64 void __iomem *host; /* relocatable reg. bases */
43 void __iomem *i2c; 65 void __iomem *i2c;
@@ -57,11 +79,23 @@ struct mb862xxfb_par {
57 unsigned int refclk; /* disp. reference clock */ 79 unsigned int refclk; /* disp. reference clock */
58 struct mb862xx_gc_mode *gc_mode; /* GDC mode init data */ 80 struct mb862xx_gc_mode *gc_mode; /* GDC mode init data */
59 int pre_init; /* don't init display if 1 */ 81 int pre_init; /* don't init display if 1 */
82 struct i2c_adapter *adap; /* GDC I2C bus adapter */
83 int i2c_rs;
84
85 struct mb862xx_l1_cfg l1_cfg;
86 int l1_stride;
60 87
61 u32 pseudo_palette[16]; 88 u32 pseudo_palette[16];
62}; 89};
63 90
64extern void mb862xxfb_init_accel(struct fb_info *info, int xres); 91extern void mb862xxfb_init_accel(struct fb_info *info, int xres);
92#ifdef CONFIG_FB_MB862XX_I2C
93extern int mb862xx_i2c_init(struct mb862xxfb_par *par);
94extern void mb862xx_i2c_exit(struct mb862xxfb_par *par);
95#else
96static inline int mb862xx_i2c_init(struct mb862xxfb_par *par) { return 0; }
97static inline void mb862xx_i2c_exit(struct mb862xxfb_par *par) { }
98#endif
65 99
66#if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC) 100#if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC)
67#error "Select Lime GDC or CoralP/Carmine support, but not both together" 101#error "Select Lime GDC or CoralP/Carmine support, but not both together"
@@ -82,4 +116,6 @@ extern void mb862xxfb_init_accel(struct fb_info *info, int xres);
82 116
83#define pack(a, b) (((a) << 16) | (b)) 117#define pack(a, b) (((a) << 16) | (b))
84 118
119#endif /* __KERNEL__ */
120
85#endif 121#endif
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index c76e663a6cd4..ea39336addfb 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -27,7 +27,7 @@
27 27
28#define NR_PALETTE 256 28#define NR_PALETTE 256
29#define MB862XX_MEM_SIZE 0x1000000 29#define MB862XX_MEM_SIZE 0x1000000
30#define CORALP_MEM_SIZE 0x4000000 30#define CORALP_MEM_SIZE 0x2000000
31#define CARMINE_MEM_SIZE 0x8000000 31#define CARMINE_MEM_SIZE 0x8000000
32#define DRV_NAME "mb862xxfb" 32#define DRV_NAME "mb862xxfb"
33 33
@@ -309,6 +309,97 @@ static int mb862xxfb_blank(int mode, struct fb_info *fbi)
309 return 0; 309 return 0;
310} 310}
311 311
312static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd,
313 unsigned long arg)
314{
315 struct mb862xxfb_par *par = fbi->par;
316 struct mb862xx_l1_cfg *l1_cfg = &par->l1_cfg;
317 void __user *argp = (void __user *)arg;
318 int *enable;
319 u32 l1em = 0;
320
321 switch (cmd) {
322 case MB862XX_L1_GET_CFG:
323 if (copy_to_user(argp, l1_cfg, sizeof(*l1_cfg)))
324 return -EFAULT;
325 break;
326 case MB862XX_L1_SET_CFG:
327 if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg)))
328 return -EFAULT;
329 if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) {
330 /* downscaling */
331 outreg(cap, GC_CAP_CSC,
332 pack((l1_cfg->sh << 11) / l1_cfg->dh,
333 (l1_cfg->sw << 11) / l1_cfg->dw));
334 l1em = inreg(disp, GC_L1EM);
335 l1em &= ~GC_L1EM_DM;
336 } else if ((l1_cfg->sw <= l1_cfg->dw) &&
337 (l1_cfg->sh <= l1_cfg->dh)) {
338 /* upscaling */
339 outreg(cap, GC_CAP_CSC,
340 pack((l1_cfg->sh << 11) / l1_cfg->dh,
341 (l1_cfg->sw << 11) / l1_cfg->dw));
342 outreg(cap, GC_CAP_CMSS,
343 pack(l1_cfg->sw >> 1, l1_cfg->sh));
344 outreg(cap, GC_CAP_CMDS,
345 pack(l1_cfg->dw >> 1, l1_cfg->dh));
346 l1em = inreg(disp, GC_L1EM);
347 l1em |= GC_L1EM_DM;
348 }
349
350 if (l1_cfg->mirror) {
351 outreg(cap, GC_CAP_CBM,
352 inreg(cap, GC_CAP_CBM) | GC_CBM_HRV);
353 l1em |= l1_cfg->dw * 2 - 8;
354 } else {
355 outreg(cap, GC_CAP_CBM,
356 inreg(cap, GC_CAP_CBM) & ~GC_CBM_HRV);
357 l1em &= 0xffff0000;
358 }
359 outreg(disp, GC_L1EM, l1em);
360 break;
361 case MB862XX_L1_ENABLE:
362 enable = (int *)arg;
363 if (*enable) {
364 outreg(disp, GC_L1DA, par->cap_buf);
365 outreg(cap, GC_CAP_IMG_START,
366 pack(l1_cfg->sy >> 1, l1_cfg->sx));
367 outreg(cap, GC_CAP_IMG_END,
368 pack(l1_cfg->sh, l1_cfg->sw));
369 outreg(disp, GC_L1M, GC_L1M_16 | GC_L1M_YC | GC_L1M_CS |
370 (par->l1_stride << 16));
371 outreg(disp, GC_L1WY_L1WX,
372 pack(l1_cfg->dy, l1_cfg->dx));
373 outreg(disp, GC_L1WH_L1WW,
374 pack(l1_cfg->dh - 1, l1_cfg->dw));
375 outreg(disp, GC_DLS, 1);
376 outreg(cap, GC_CAP_VCM,
377 GC_VCM_VIE | GC_VCM_CM | GC_VCM_VS_PAL);
378 outreg(disp, GC_DCM1, inreg(disp, GC_DCM1) |
379 GC_DCM1_DEN | GC_DCM1_L1E);
380 } else {
381 outreg(cap, GC_CAP_VCM,
382 inreg(cap, GC_CAP_VCM) & ~GC_VCM_VIE);
383 outreg(disp, GC_DCM1,
384 inreg(disp, GC_DCM1) & ~GC_DCM1_L1E);
385 }
386 break;
387 case MB862XX_L1_CAP_CTL:
388 enable = (int *)arg;
389 if (*enable) {
390 outreg(cap, GC_CAP_VCM,
391 inreg(cap, GC_CAP_VCM) | GC_VCM_VIE);
392 } else {
393 outreg(cap, GC_CAP_VCM,
394 inreg(cap, GC_CAP_VCM) & ~GC_VCM_VIE);
395 }
396 break;
397 default:
398 return -EINVAL;
399 }
400 return 0;
401}
402
312/* framebuffer ops */ 403/* framebuffer ops */
313static struct fb_ops mb862xxfb_ops = { 404static struct fb_ops mb862xxfb_ops = {
314 .owner = THIS_MODULE, 405 .owner = THIS_MODULE,
@@ -320,6 +411,7 @@ static struct fb_ops mb862xxfb_ops = {
320 .fb_fillrect = cfb_fillrect, 411 .fb_fillrect = cfb_fillrect,
321 .fb_copyarea = cfb_copyarea, 412 .fb_copyarea = cfb_copyarea,
322 .fb_imageblit = cfb_imageblit, 413 .fb_imageblit = cfb_imageblit,
414 .fb_ioctl = mb862xxfb_ioctl,
323}; 415};
324 416
325/* initialize fb_info data */ 417/* initialize fb_info data */
@@ -328,6 +420,7 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
328 struct mb862xxfb_par *par = fbi->par; 420 struct mb862xxfb_par *par = fbi->par;
329 struct mb862xx_gc_mode *mode = par->gc_mode; 421 struct mb862xx_gc_mode *mode = par->gc_mode;
330 unsigned long reg; 422 unsigned long reg;
423 int stride;
331 424
332 fbi->fbops = &mb862xxfb_ops; 425 fbi->fbops = &mb862xxfb_ops;
333 fbi->pseudo_palette = par->pseudo_palette; 426 fbi->pseudo_palette = par->pseudo_palette;
@@ -336,7 +429,6 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
336 429
337 strcpy(fbi->fix.id, DRV_NAME); 430 strcpy(fbi->fix.id, DRV_NAME);
338 fbi->fix.smem_start = (unsigned long)par->fb_base_phys; 431 fbi->fix.smem_start = (unsigned long)par->fb_base_phys;
339 fbi->fix.smem_len = par->mapped_vram;
340 fbi->fix.mmio_start = (unsigned long)par->mmio_base_phys; 432 fbi->fix.mmio_start = (unsigned long)par->mmio_base_phys;
341 fbi->fix.mmio_len = par->mmio_len; 433 fbi->fix.mmio_len = par->mmio_len;
342 fbi->fix.accel = FB_ACCEL_NONE; 434 fbi->fix.accel = FB_ACCEL_NONE;
@@ -420,6 +512,28 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi)
420 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; 512 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
421 fbi->fix.line_length = (fbi->var.xres_virtual * 513 fbi->fix.line_length = (fbi->var.xres_virtual *
422 fbi->var.bits_per_pixel) / 8; 514 fbi->var.bits_per_pixel) / 8;
515 fbi->fix.smem_len = fbi->fix.line_length * fbi->var.yres_virtual;
516
517 /*
518 * reserve space for capture buffers and two cursors
519 * at the end of vram: 720x576 * 2 * 2.2 + 64x64 * 16.
520 */
521 par->cap_buf = par->mapped_vram - 0x1bd800 - 0x10000;
522 par->cap_len = 0x1bd800;
523 par->l1_cfg.sx = 0;
524 par->l1_cfg.sy = 0;
525 par->l1_cfg.sw = 720;
526 par->l1_cfg.sh = 576;
527 par->l1_cfg.dx = 0;
528 par->l1_cfg.dy = 0;
529 par->l1_cfg.dw = 720;
530 par->l1_cfg.dh = 576;
531 stride = par->l1_cfg.sw * (fbi->var.bits_per_pixel / 8);
532 par->l1_stride = stride / 64 + ((stride % 64) ? 1 : 0);
533 outreg(cap, GC_CAP_CBM, GC_CBM_OO | GC_CBM_CBST |
534 (par->l1_stride << 16));
535 outreg(cap, GC_CAP_CBOA, par->cap_buf);
536 outreg(cap, GC_CAP_CBLA, par->cap_buf + par->cap_len);
423 return 0; 537 return 0;
424} 538}
425 539
@@ -742,22 +856,38 @@ static int coralp_init(struct mb862xxfb_par *par)
742 856
743 par->refclk = GC_DISP_REFCLK_400; 857 par->refclk = GC_DISP_REFCLK_400;
744 858
859 if (par->mapped_vram >= 0x2000000) {
860 /* relocate gdc registers space */
861 writel(1, par->fb_base + MB862XX_MMIO_BASE + GC_RSW);
862 udelay(1); /* wait at least 20 bus cycles */
863 }
864
745 ver = inreg(host, GC_CID); 865 ver = inreg(host, GC_CID);
746 cn = (ver & GC_CID_CNAME_MSK) >> 8; 866 cn = (ver & GC_CID_CNAME_MSK) >> 8;
747 ver = ver & GC_CID_VERSION_MSK; 867 ver = ver & GC_CID_VERSION_MSK;
748 if (cn == 3) { 868 if (cn == 3) {
869 unsigned long reg;
870
749 dev_info(par->dev, "Fujitsu Coral-%s GDC Rev.%d found\n",\ 871 dev_info(par->dev, "Fujitsu Coral-%s GDC Rev.%d found\n",\
750 (ver == 6) ? "P" : (ver == 8) ? "PA" : "?", 872 (ver == 6) ? "P" : (ver == 8) ? "PA" : "?",
751 par->pdev->revision); 873 par->pdev->revision);
752 outreg(host, GC_CCF, GC_CCF_CGE_166 | GC_CCF_COT_133); 874 reg = inreg(disp, GC_DCM1);
753 udelay(200); 875 if (reg & GC_DCM01_DEN && reg & GC_DCM01_L0E)
754 outreg(host, GC_MMR, GC_MMR_CORALP_EVB_VAL); 876 par->pre_init = 1;
755 udelay(10); 877
878 if (!par->pre_init) {
879 outreg(host, GC_CCF, GC_CCF_CGE_166 | GC_CCF_COT_133);
880 udelay(200);
881 outreg(host, GC_MMR, GC_MMR_CORALP_EVB_VAL);
882 udelay(10);
883 }
756 /* Clear interrupt status */ 884 /* Clear interrupt status */
757 outreg(host, GC_IST, 0); 885 outreg(host, GC_IST, 0);
758 } else { 886 } else {
759 return -ENODEV; 887 return -ENODEV;
760 } 888 }
889
890 mb862xx_i2c_init(par);
761 return 0; 891 return 0;
762} 892}
763 893
@@ -899,7 +1029,13 @@ static int __devinit mb862xx_pci_probe(struct pci_dev *pdev,
899 case PCI_DEVICE_ID_FUJITSU_CORALPA: 1029 case PCI_DEVICE_ID_FUJITSU_CORALPA:
900 par->fb_base_phys = pci_resource_start(par->pdev, 0); 1030 par->fb_base_phys = pci_resource_start(par->pdev, 0);
901 par->mapped_vram = CORALP_MEM_SIZE; 1031 par->mapped_vram = CORALP_MEM_SIZE;
902 par->mmio_base_phys = par->fb_base_phys + MB862XX_MMIO_BASE; 1032 if (par->mapped_vram >= 0x2000000) {
1033 par->mmio_base_phys = par->fb_base_phys +
1034 MB862XX_MMIO_HIGH_BASE;
1035 } else {
1036 par->mmio_base_phys = par->fb_base_phys +
1037 MB862XX_MMIO_BASE;
1038 }
903 par->mmio_len = MB862XX_MMIO_SIZE; 1039 par->mmio_len = MB862XX_MMIO_SIZE;
904 par->type = BT_CORALP; 1040 par->type = BT_CORALP;
905 break; 1041 break;
@@ -1009,6 +1145,8 @@ static void __devexit mb862xx_pci_remove(struct pci_dev *pdev)
1009 outreg(host, GC_IMASK, 0); 1145 outreg(host, GC_IMASK, 0);
1010 } 1146 }
1011 1147
1148 mb862xx_i2c_exit(par);
1149
1012 device_remove_file(&pdev->dev, &dev_attr_dispregs); 1150 device_remove_file(&pdev->dev, &dev_attr_dispregs);
1013 1151
1014 pci_set_drvdata(pdev, NULL); 1152 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 529483467abf..0ccd7adf47bb 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -922,14 +922,14 @@ static int get_dss_clocks(void)
922 return PTR_ERR(dispc.dss_ick); 922 return PTR_ERR(dispc.dss_ick);
923 } 923 }
924 924
925 dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck"); 925 dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "fck");
926 if (IS_ERR(dispc.dss1_fck)) { 926 if (IS_ERR(dispc.dss1_fck)) {
927 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); 927 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
928 clk_put(dispc.dss_ick); 928 clk_put(dispc.dss_ick);
929 return PTR_ERR(dispc.dss1_fck); 929 return PTR_ERR(dispc.dss1_fck);
930 } 930 }
931 931
932 dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_fck"); 932 dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_clk");
933 if (IS_ERR(dispc.dss_54m_fck)) { 933 if (IS_ERR(dispc.dss_54m_fck)) {
934 dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); 934 dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
935 clk_put(dispc.dss_ick); 935 clk_put(dispc.dss_ick);
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index e264efd0278f..b3ddd743d8a6 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -90,7 +90,7 @@ static void omapdss_release(struct device *dev)
90 90
91/* dummy device for clocks */ 91/* dummy device for clocks */
92static struct platform_device omapdss_device = { 92static struct platform_device omapdss_device = {
93 .name = "omapdss", 93 .name = "omapdss_dss",
94 .id = -1, 94 .id = -1,
95 .dev = { 95 .dev = {
96 .release = omapdss_release, 96 .release = omapdss_release,
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index eada9f12efc7..0c6981f1a4a3 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -90,7 +90,7 @@ static int rfbi_get_clocks(void)
90 return PTR_ERR(rfbi.dss_ick); 90 return PTR_ERR(rfbi.dss_ick);
91 } 91 }
92 92
93 rfbi.dss1_fck = clk_get(&rfbi.fbdev->dssdev->dev, "dss1_fck"); 93 rfbi.dss1_fck = clk_get(&rfbi.fbdev->dssdev->dev, "fck");
94 if (IS_ERR(rfbi.dss1_fck)) { 94 if (IS_ERR(rfbi.dss1_fck)) {
95 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); 95 dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n");
96 clk_put(rfbi.dss_ick); 96 clk_put(rfbi.dss_ick);
diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile
index d853d05dad31..5ddef129f798 100644
--- a/drivers/video/omap2/Makefile
+++ b/drivers/video/omap2/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_OMAP2_VRAM) += vram.o 1obj-$(CONFIG_OMAP2_VRAM) += vram.o
2obj-$(CONFIG_OMAP2_VRFB) += vrfb.o 2obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
3 3
4obj-y += dss/ 4obj-$(CONFIG_OMAP2_DSS) += dss/
5obj-y += omapfb/ 5obj-$(CONFIG_FB_OMAP2) += omapfb/
6obj-y += displays/ 6obj-y += displays/
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index d18ad6b2372a..609a28073178 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -3,6 +3,7 @@ menu "OMAP2/3 Display Device Drivers"
3 3
4config PANEL_GENERIC_DPI 4config PANEL_GENERIC_DPI
5 tristate "Generic DPI Panel" 5 tristate "Generic DPI Panel"
6 depends on OMAP2_DSS_DPI
6 help 7 help
7 Generic DPI panel driver. 8 Generic DPI panel driver.
8 Supports DVI output for Beagle and OMAP3 SDP. 9 Supports DVI output for Beagle and OMAP3 SDP.
@@ -11,20 +12,20 @@ config PANEL_GENERIC_DPI
11 12
12config PANEL_LGPHILIPS_LB035Q02 13config PANEL_LGPHILIPS_LB035Q02
13 tristate "LG.Philips LB035Q02 LCD Panel" 14 tristate "LG.Philips LB035Q02 LCD Panel"
14 depends on OMAP2_DSS && SPI 15 depends on OMAP2_DSS_DPI && SPI
15 help 16 help
16 LCD Panel used on the Gumstix Overo Palo35 17 LCD Panel used on the Gumstix Overo Palo35
17 18
18config PANEL_SHARP_LS037V7DW01 19config PANEL_SHARP_LS037V7DW01
19 tristate "Sharp LS037V7DW01 LCD Panel" 20 tristate "Sharp LS037V7DW01 LCD Panel"
20 depends on OMAP2_DSS 21 depends on OMAP2_DSS_DPI
21 select BACKLIGHT_CLASS_DEVICE 22 select BACKLIGHT_CLASS_DEVICE
22 help 23 help
23 LCD Panel used in TI's SDP3430 and EVM boards 24 LCD Panel used in TI's SDP3430 and EVM boards
24 25
25config PANEL_NEC_NL8048HL11_01B 26config PANEL_NEC_NL8048HL11_01B
26 tristate "NEC NL8048HL11-01B Panel" 27 tristate "NEC NL8048HL11-01B Panel"
27 depends on OMAP2_DSS 28 depends on OMAP2_DSS_DPI
28 help 29 help
29 This NEC NL8048HL11-01B panel is TFT LCD 30 This NEC NL8048HL11-01B panel is TFT LCD
30 used in the Zoom2/3/3630 sdp boards. 31 used in the Zoom2/3/3630 sdp boards.
@@ -37,7 +38,7 @@ config PANEL_TAAL
37 38
38config PANEL_TPO_TD043MTEA1 39config PANEL_TPO_TD043MTEA1
39 tristate "TPO TD043MTEA1 LCD Panel" 40 tristate "TPO TD043MTEA1 LCD Panel"
40 depends on OMAP2_DSS && SPI 41 depends on OMAP2_DSS_DPI && SPI
41 help 42 help
42 LCD Panel used in OMAP3 Pandora 43 LCD Panel used in OMAP3 Pandora
43 44
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index 7e04c921aa2a..dbd59b8e5b36 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -30,7 +30,7 @@
30#include <linux/backlight.h> 30#include <linux/backlight.h>
31#include <linux/fb.h> 31#include <linux/fb.h>
32 32
33#include <plat/display.h> 33#include <video/omapdss.h>
34 34
35#define MIPID_CMD_READ_DISP_ID 0x04 35#define MIPID_CMD_READ_DISP_ID 0x04
36#define MIPID_CMD_READ_RED 0x06 36#define MIPID_CMD_READ_RED 0x06
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 4a9b9ff59467..9c90f75653fb 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -33,8 +33,9 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <video/omapdss.h>
36 37
37#include <plat/panel-generic-dpi.h> 38#include <video/omap-panel-generic-dpi.h>
38 39
39struct panel_config { 40struct panel_config {
40 struct omap_video_timings timings; 41 struct omap_video_timings timings;
@@ -181,6 +182,56 @@ static struct panel_config generic_dpi_panels[] = {
181 .power_off_delay = 0, 182 .power_off_delay = 0,
182 .name = "samsung_lte430wq_f0c", 183 .name = "samsung_lte430wq_f0c",
183 }, 184 },
185
186 /* Seiko 70WVW1TZ3Z3 */
187 {
188 {
189 .x_res = 800,
190 .y_res = 480,
191
192 .pixel_clock = 33000,
193
194 .hsw = 128,
195 .hfp = 10,
196 .hbp = 10,
197
198 .vsw = 2,
199 .vfp = 4,
200 .vbp = 11,
201 },
202 .acbi = 0x0,
203 .acb = 0x0,
204 .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
205 OMAP_DSS_LCD_IHS,
206 .power_on_delay = 0,
207 .power_off_delay = 0,
208 .name = "seiko_70wvw1tz3",
209 },
210
211 /* Powertip PH480272T */
212 {
213 {
214 .x_res = 480,
215 .y_res = 272,
216
217 .pixel_clock = 9000,
218
219 .hsw = 40,
220 .hfp = 2,
221 .hbp = 2,
222
223 .vsw = 10,
224 .vfp = 2,
225 .vbp = 2,
226 },
227 .acbi = 0x0,
228 .acb = 0x0,
229 .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
230 OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO,
231 .power_on_delay = 0,
232 .power_off_delay = 0,
233 .name = "powertip_ph480272t",
234 },
184}; 235};
185 236
186struct panel_drv_data { 237struct panel_drv_data {
@@ -285,7 +336,7 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
285 return 0; 336 return 0;
286} 337}
287 338
288static void generic_dpi_panel_remove(struct omap_dss_device *dssdev) 339static void __exit generic_dpi_panel_remove(struct omap_dss_device *dssdev)
289{ 340{
290 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); 341 struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
291 342
@@ -358,7 +409,7 @@ static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
358 409
359static struct omap_dss_driver dpi_driver = { 410static struct omap_dss_driver dpi_driver = {
360 .probe = generic_dpi_panel_probe, 411 .probe = generic_dpi_panel_probe,
361 .remove = generic_dpi_panel_remove, 412 .remove = __exit_p(generic_dpi_panel_remove),
362 413
363 .enable = generic_dpi_panel_enable, 414 .enable = generic_dpi_panel_enable,
364 .disable = generic_dpi_panel_disable, 415 .disable = generic_dpi_panel_disable,
diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
index 271324db2436..e0eb35be303e 100644
--- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
@@ -21,7 +21,7 @@
21#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23 23
24#include <plat/display.h> 24#include <video/omapdss.h>
25 25
26struct lb035q02_data { 26struct lb035q02_data {
27 struct mutex lock; 27 struct mutex lock;
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
index 925e0fadff54..2ba9d0ca187c 100644
--- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -22,7 +22,7 @@
22#include <linux/backlight.h> 22#include <linux/backlight.h>
23#include <linux/fb.h> 23#include <linux/fb.h>
24 24
25#include <plat/display.h> 25#include <video/omapdss.h>
26 26
27#define LCD_XRES 800 27#define LCD_XRES 800
28#define LCD_YRES 480 28#define LCD_YRES 480
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index d2b35d2df2a6..ba38b3ad17d6 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -25,7 +25,7 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27 27
28#include <plat/display.h> 28#include <video/omapdss.h>
29 29
30struct sharp_data { 30struct sharp_data {
31 struct backlight_device *bl; 31 struct backlight_device *bl;
@@ -120,7 +120,7 @@ static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
120 return 0; 120 return 0;
121} 121}
122 122
123static void sharp_ls_panel_remove(struct omap_dss_device *dssdev) 123static void __exit sharp_ls_panel_remove(struct omap_dss_device *dssdev)
124{ 124{
125 struct sharp_data *sd = dev_get_drvdata(&dssdev->dev); 125 struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
126 struct backlight_device *bl = sd->bl; 126 struct backlight_device *bl = sd->bl;
@@ -205,7 +205,7 @@ static int sharp_ls_panel_resume(struct omap_dss_device *dssdev)
205 205
206static struct omap_dss_driver sharp_ls_driver = { 206static struct omap_dss_driver sharp_ls_driver = {
207 .probe = sharp_ls_panel_probe, 207 .probe = sharp_ls_panel_probe,
208 .remove = sharp_ls_panel_remove, 208 .remove = __exit_p(sharp_ls_panel_remove),
209 209
210 .enable = sharp_ls_panel_enable, 210 .enable = sharp_ls_panel_enable,
211 .disable = sharp_ls_panel_disable, 211 .disable = sharp_ls_panel_disable,
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index adc9900458e1..fdd5d4ae437d 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -33,8 +33,8 @@
33#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35 35
36#include <plat/display.h> 36#include <video/omapdss.h>
37#include <plat/nokia-dsi-panel.h> 37#include <video/omap-panel-nokia-dsi.h>
38 38
39/* DSI Virtual channel. Hardcoded for now. */ 39/* DSI Virtual channel. Hardcoded for now. */
40#define TCH 0 40#define TCH 0
@@ -63,12 +63,12 @@
63#define DCS_GET_ID2 0xdb 63#define DCS_GET_ID2 0xdb
64#define DCS_GET_ID3 0xdc 64#define DCS_GET_ID3 0xdc
65 65
66#define TAAL_ESD_CHECK_PERIOD msecs_to_jiffies(5000)
67
68static irqreturn_t taal_te_isr(int irq, void *data); 66static irqreturn_t taal_te_isr(int irq, void *data);
69static void taal_te_timeout_work_callback(struct work_struct *work); 67static void taal_te_timeout_work_callback(struct work_struct *work);
70static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable); 68static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
71 69
70static int taal_panel_reset(struct omap_dss_device *dssdev);
71
72struct panel_regulator { 72struct panel_regulator {
73 struct regulator *regulator; 73 struct regulator *regulator;
74 const char *name; 74 const char *name;
@@ -229,8 +229,14 @@ struct taal_data {
229 229
230 bool intro_printed; 230 bool intro_printed;
231 231
232 struct workqueue_struct *esd_wq; 232 struct workqueue_struct *workqueue;
233
233 struct delayed_work esd_work; 234 struct delayed_work esd_work;
235 unsigned esd_interval;
236
237 bool ulps_enabled;
238 unsigned ulps_timeout;
239 struct delayed_work ulps_work;
234 240
235 struct panel_config *panel_config; 241 struct panel_config *panel_config;
236}; 242};
@@ -242,6 +248,7 @@ static inline struct nokia_dsi_panel_data
242} 248}
243 249
244static void taal_esd_work(struct work_struct *work); 250static void taal_esd_work(struct work_struct *work);
251static void taal_ulps_work(struct work_struct *work);
245 252
246static void hw_guard_start(struct taal_data *td, int guard_msec) 253static void hw_guard_start(struct taal_data *td, int guard_msec)
247{ 254{
@@ -264,7 +271,7 @@ static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data)
264 int r; 271 int r;
265 u8 buf[1]; 272 u8 buf[1];
266 273
267 r = dsi_vc_dcs_read(td->channel, dcs_cmd, buf, 1); 274 r = dsi_vc_dcs_read(td->dssdev, td->channel, dcs_cmd, buf, 1);
268 275
269 if (r < 0) 276 if (r < 0)
270 return r; 277 return r;
@@ -276,7 +283,7 @@ static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data)
276 283
277static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd) 284static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd)
278{ 285{
279 return dsi_vc_dcs_write(td->channel, &dcs_cmd, 1); 286 return dsi_vc_dcs_write(td->dssdev, td->channel, &dcs_cmd, 1);
280} 287}
281 288
282static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param) 289static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param)
@@ -284,7 +291,7 @@ static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param)
284 u8 buf[2]; 291 u8 buf[2];
285 buf[0] = dcs_cmd; 292 buf[0] = dcs_cmd;
286 buf[1] = param; 293 buf[1] = param;
287 return dsi_vc_dcs_write(td->channel, buf, 2); 294 return dsi_vc_dcs_write(td->dssdev, td->channel, buf, 2);
288} 295}
289 296
290static int taal_sleep_in(struct taal_data *td) 297static int taal_sleep_in(struct taal_data *td)
@@ -296,7 +303,7 @@ static int taal_sleep_in(struct taal_data *td)
296 hw_guard_wait(td); 303 hw_guard_wait(td);
297 304
298 cmd = DCS_SLEEP_IN; 305 cmd = DCS_SLEEP_IN;
299 r = dsi_vc_dcs_write_nosync(td->channel, &cmd, 1); 306 r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, &cmd, 1);
300 if (r) 307 if (r)
301 return r; 308 return r;
302 309
@@ -402,7 +409,7 @@ static int taal_set_update_window(struct taal_data *td,
402 buf[3] = (x2 >> 8) & 0xff; 409 buf[3] = (x2 >> 8) & 0xff;
403 buf[4] = (x2 >> 0) & 0xff; 410 buf[4] = (x2 >> 0) & 0xff;
404 411
405 r = dsi_vc_dcs_write_nosync(td->channel, buf, sizeof(buf)); 412 r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf));
406 if (r) 413 if (r)
407 return r; 414 return r;
408 415
@@ -412,15 +419,132 @@ static int taal_set_update_window(struct taal_data *td,
412 buf[3] = (y2 >> 8) & 0xff; 419 buf[3] = (y2 >> 8) & 0xff;
413 buf[4] = (y2 >> 0) & 0xff; 420 buf[4] = (y2 >> 0) & 0xff;
414 421
415 r = dsi_vc_dcs_write_nosync(td->channel, buf, sizeof(buf)); 422 r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf));
416 if (r) 423 if (r)
417 return r; 424 return r;
418 425
419 dsi_vc_send_bta_sync(td->channel); 426 dsi_vc_send_bta_sync(td->dssdev, td->channel);
420 427
421 return r; 428 return r;
422} 429}
423 430
431static void taal_queue_esd_work(struct omap_dss_device *dssdev)
432{
433 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
434
435 if (td->esd_interval > 0)
436 queue_delayed_work(td->workqueue, &td->esd_work,
437 msecs_to_jiffies(td->esd_interval));
438}
439
440static void taal_cancel_esd_work(struct omap_dss_device *dssdev)
441{
442 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
443
444 cancel_delayed_work(&td->esd_work);
445}
446
447static void taal_queue_ulps_work(struct omap_dss_device *dssdev)
448{
449 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
450
451 if (td->ulps_timeout > 0)
452 queue_delayed_work(td->workqueue, &td->ulps_work,
453 msecs_to_jiffies(td->ulps_timeout));
454}
455
456static void taal_cancel_ulps_work(struct omap_dss_device *dssdev)
457{
458 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
459
460 cancel_delayed_work(&td->ulps_work);
461}
462
463static int taal_enter_ulps(struct omap_dss_device *dssdev)
464{
465 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
466 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
467 int r;
468
469 if (td->ulps_enabled)
470 return 0;
471
472 taal_cancel_ulps_work(dssdev);
473
474 r = _taal_enable_te(dssdev, false);
475 if (r)
476 goto err;
477
478 disable_irq(gpio_to_irq(panel_data->ext_te_gpio));
479
480 omapdss_dsi_display_disable(dssdev, false, true);
481
482 td->ulps_enabled = true;
483
484 return 0;
485
486err:
487 dev_err(&dssdev->dev, "enter ULPS failed");
488 taal_panel_reset(dssdev);
489
490 td->ulps_enabled = false;
491
492 taal_queue_ulps_work(dssdev);
493
494 return r;
495}
496
497static int taal_exit_ulps(struct omap_dss_device *dssdev)
498{
499 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
500 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
501 int r;
502
503 if (!td->ulps_enabled)
504 return 0;
505
506 r = omapdss_dsi_display_enable(dssdev);
507 if (r)
508 goto err;
509
510 omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
511
512 r = _taal_enable_te(dssdev, true);
513 if (r)
514 goto err;
515
516 enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
517
518 taal_queue_ulps_work(dssdev);
519
520 td->ulps_enabled = false;
521
522 return 0;
523
524err:
525 dev_err(&dssdev->dev, "exit ULPS failed");
526 r = taal_panel_reset(dssdev);
527
528 enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
529 td->ulps_enabled = false;
530
531 taal_queue_ulps_work(dssdev);
532
533 return r;
534}
535
536static int taal_wake_up(struct omap_dss_device *dssdev)
537{
538 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
539
540 if (td->ulps_enabled)
541 return taal_exit_ulps(dssdev);
542
543 taal_cancel_ulps_work(dssdev);
544 taal_queue_ulps_work(dssdev);
545 return 0;
546}
547
424static int taal_bl_update_status(struct backlight_device *dev) 548static int taal_bl_update_status(struct backlight_device *dev)
425{ 549{
426 struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev); 550 struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
@@ -441,9 +565,13 @@ static int taal_bl_update_status(struct backlight_device *dev)
441 565
442 if (td->use_dsi_bl) { 566 if (td->use_dsi_bl) {
443 if (td->enabled) { 567 if (td->enabled) {
444 dsi_bus_lock(); 568 dsi_bus_lock(dssdev);
445 r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level); 569
446 dsi_bus_unlock(); 570 r = taal_wake_up(dssdev);
571 if (!r)
572 r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level);
573
574 dsi_bus_unlock(dssdev);
447 } else { 575 } else {
448 r = 0; 576 r = 0;
449 } 577 }
@@ -504,9 +632,13 @@ static ssize_t taal_num_errors_show(struct device *dev,
504 mutex_lock(&td->lock); 632 mutex_lock(&td->lock);
505 633
506 if (td->enabled) { 634 if (td->enabled) {
507 dsi_bus_lock(); 635 dsi_bus_lock(dssdev);
508 r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors); 636
509 dsi_bus_unlock(); 637 r = taal_wake_up(dssdev);
638 if (!r)
639 r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors);
640
641 dsi_bus_unlock(dssdev);
510 } else { 642 } else {
511 r = -ENODEV; 643 r = -ENODEV;
512 } 644 }
@@ -530,9 +662,13 @@ static ssize_t taal_hw_revision_show(struct device *dev,
530 mutex_lock(&td->lock); 662 mutex_lock(&td->lock);
531 663
532 if (td->enabled) { 664 if (td->enabled) {
533 dsi_bus_lock(); 665 dsi_bus_lock(dssdev);
534 r = taal_get_id(td, &id1, &id2, &id3); 666
535 dsi_bus_unlock(); 667 r = taal_wake_up(dssdev);
668 if (!r)
669 r = taal_get_id(td, &id1, &id2, &id3);
670
671 dsi_bus_unlock(dssdev);
536 } else { 672 } else {
537 r = -ENODEV; 673 r = -ENODEV;
538 } 674 }
@@ -579,6 +715,7 @@ static ssize_t store_cabc_mode(struct device *dev,
579 struct omap_dss_device *dssdev = to_dss_device(dev); 715 struct omap_dss_device *dssdev = to_dss_device(dev);
580 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 716 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
581 int i; 717 int i;
718 int r;
582 719
583 for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { 720 for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
584 if (sysfs_streq(cabc_modes[i], buf)) 721 if (sysfs_streq(cabc_modes[i], buf))
@@ -591,10 +728,19 @@ static ssize_t store_cabc_mode(struct device *dev,
591 mutex_lock(&td->lock); 728 mutex_lock(&td->lock);
592 729
593 if (td->enabled) { 730 if (td->enabled) {
594 dsi_bus_lock(); 731 dsi_bus_lock(dssdev);
595 if (!td->cabc_broken) 732
596 taal_dcs_write_1(td, DCS_WRITE_CABC, i); 733 if (!td->cabc_broken) {
597 dsi_bus_unlock(); 734 r = taal_wake_up(dssdev);
735 if (r)
736 goto err;
737
738 r = taal_dcs_write_1(td, DCS_WRITE_CABC, i);
739 if (r)
740 goto err;
741 }
742
743 dsi_bus_unlock(dssdev);
598 } 744 }
599 745
600 td->cabc_mode = i; 746 td->cabc_mode = i;
@@ -602,6 +748,10 @@ static ssize_t store_cabc_mode(struct device *dev,
602 mutex_unlock(&td->lock); 748 mutex_unlock(&td->lock);
603 749
604 return count; 750 return count;
751err:
752 dsi_bus_unlock(dssdev);
753 mutex_unlock(&td->lock);
754 return r;
605} 755}
606 756
607static ssize_t show_cabc_available_modes(struct device *dev, 757static ssize_t show_cabc_available_modes(struct device *dev,
@@ -620,18 +770,161 @@ static ssize_t show_cabc_available_modes(struct device *dev,
620 return len < PAGE_SIZE ? len : PAGE_SIZE - 1; 770 return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
621} 771}
622 772
773static ssize_t taal_store_esd_interval(struct device *dev,
774 struct device_attribute *attr,
775 const char *buf, size_t count)
776{
777 struct omap_dss_device *dssdev = to_dss_device(dev);
778 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
779
780 unsigned long t;
781 int r;
782
783 r = strict_strtoul(buf, 10, &t);
784 if (r)
785 return r;
786
787 mutex_lock(&td->lock);
788 taal_cancel_esd_work(dssdev);
789 td->esd_interval = t;
790 if (td->enabled)
791 taal_queue_esd_work(dssdev);
792 mutex_unlock(&td->lock);
793
794 return count;
795}
796
797static ssize_t taal_show_esd_interval(struct device *dev,
798 struct device_attribute *attr,
799 char *buf)
800{
801 struct omap_dss_device *dssdev = to_dss_device(dev);
802 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
803 unsigned t;
804
805 mutex_lock(&td->lock);
806 t = td->esd_interval;
807 mutex_unlock(&td->lock);
808
809 return snprintf(buf, PAGE_SIZE, "%u\n", t);
810}
811
812static ssize_t taal_store_ulps(struct device *dev,
813 struct device_attribute *attr,
814 const char *buf, size_t count)
815{
816 struct omap_dss_device *dssdev = to_dss_device(dev);
817 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
818 unsigned long t;
819 int r;
820
821 r = strict_strtoul(buf, 10, &t);
822 if (r)
823 return r;
824
825 mutex_lock(&td->lock);
826
827 if (td->enabled) {
828 dsi_bus_lock(dssdev);
829
830 if (t)
831 r = taal_enter_ulps(dssdev);
832 else
833 r = taal_wake_up(dssdev);
834
835 dsi_bus_unlock(dssdev);
836 }
837
838 mutex_unlock(&td->lock);
839
840 if (r)
841 return r;
842
843 return count;
844}
845
846static ssize_t taal_show_ulps(struct device *dev,
847 struct device_attribute *attr,
848 char *buf)
849{
850 struct omap_dss_device *dssdev = to_dss_device(dev);
851 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
852 unsigned t;
853
854 mutex_lock(&td->lock);
855 t = td->ulps_enabled;
856 mutex_unlock(&td->lock);
857
858 return snprintf(buf, PAGE_SIZE, "%u\n", t);
859}
860
861static ssize_t taal_store_ulps_timeout(struct device *dev,
862 struct device_attribute *attr,
863 const char *buf, size_t count)
864{
865 struct omap_dss_device *dssdev = to_dss_device(dev);
866 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
867 unsigned long t;
868 int r;
869
870 r = strict_strtoul(buf, 10, &t);
871 if (r)
872 return r;
873
874 mutex_lock(&td->lock);
875 td->ulps_timeout = t;
876
877 if (td->enabled) {
878 /* taal_wake_up will restart the timer */
879 dsi_bus_lock(dssdev);
880 r = taal_wake_up(dssdev);
881 dsi_bus_unlock(dssdev);
882 }
883
884 mutex_unlock(&td->lock);
885
886 if (r)
887 return r;
888
889 return count;
890}
891
892static ssize_t taal_show_ulps_timeout(struct device *dev,
893 struct device_attribute *attr,
894 char *buf)
895{
896 struct omap_dss_device *dssdev = to_dss_device(dev);
897 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
898 unsigned t;
899
900 mutex_lock(&td->lock);
901 t = td->ulps_timeout;
902 mutex_unlock(&td->lock);
903
904 return snprintf(buf, PAGE_SIZE, "%u\n", t);
905}
906
623static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL); 907static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL);
624static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL); 908static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL);
625static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, 909static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
626 show_cabc_mode, store_cabc_mode); 910 show_cabc_mode, store_cabc_mode);
627static DEVICE_ATTR(cabc_available_modes, S_IRUGO, 911static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
628 show_cabc_available_modes, NULL); 912 show_cabc_available_modes, NULL);
913static DEVICE_ATTR(esd_interval, S_IRUGO | S_IWUSR,
914 taal_show_esd_interval, taal_store_esd_interval);
915static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR,
916 taal_show_ulps, taal_store_ulps);
917static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR,
918 taal_show_ulps_timeout, taal_store_ulps_timeout);
629 919
630static struct attribute *taal_attrs[] = { 920static struct attribute *taal_attrs[] = {
631 &dev_attr_num_dsi_errors.attr, 921 &dev_attr_num_dsi_errors.attr,
632 &dev_attr_hw_revision.attr, 922 &dev_attr_hw_revision.attr,
633 &dev_attr_cabc_mode.attr, 923 &dev_attr_cabc_mode.attr,
634 &dev_attr_cabc_available_modes.attr, 924 &dev_attr_cabc_available_modes.attr,
925 &dev_attr_esd_interval.attr,
926 &dev_attr_ulps.attr,
927 &dev_attr_ulps_timeout.attr,
635 NULL, 928 NULL,
636}; 929};
637 930
@@ -700,6 +993,9 @@ static int taal_probe(struct omap_dss_device *dssdev)
700 } 993 }
701 td->dssdev = dssdev; 994 td->dssdev = dssdev;
702 td->panel_config = panel_config; 995 td->panel_config = panel_config;
996 td->esd_interval = panel_data->esd_interval;
997 td->ulps_enabled = false;
998 td->ulps_timeout = panel_data->ulps_timeout;
703 999
704 mutex_init(&td->lock); 1000 mutex_init(&td->lock);
705 1001
@@ -710,13 +1006,14 @@ static int taal_probe(struct omap_dss_device *dssdev)
710 if (r) 1006 if (r)
711 goto err_reg; 1007 goto err_reg;
712 1008
713 td->esd_wq = create_singlethread_workqueue("taal_esd"); 1009 td->workqueue = create_singlethread_workqueue("taal_esd");
714 if (td->esd_wq == NULL) { 1010 if (td->workqueue == NULL) {
715 dev_err(&dssdev->dev, "can't create ESD workqueue\n"); 1011 dev_err(&dssdev->dev, "can't create ESD workqueue\n");
716 r = -ENOMEM; 1012 r = -ENOMEM;
717 goto err_wq; 1013 goto err_wq;
718 } 1014 }
719 INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work); 1015 INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work);
1016 INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work);
720 1017
721 dev_set_drvdata(&dssdev->dev, td); 1018 dev_set_drvdata(&dssdev->dev, td);
722 1019
@@ -734,8 +1031,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
734 props.max_brightness = 127; 1031 props.max_brightness = 127;
735 1032
736 props.type = BACKLIGHT_RAW; 1033 props.type = BACKLIGHT_RAW;
737 bldev = backlight_device_register("taal", &dssdev->dev, dssdev, 1034 bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev,
738 &taal_bl_ops, &props); 1035 dssdev, &taal_bl_ops, &props);
739 if (IS_ERR(bldev)) { 1036 if (IS_ERR(bldev)) {
740 r = PTR_ERR(bldev); 1037 r = PTR_ERR(bldev);
741 goto err_bl; 1038 goto err_bl;
@@ -810,7 +1107,7 @@ err_irq:
810err_gpio: 1107err_gpio:
811 backlight_device_unregister(bldev); 1108 backlight_device_unregister(bldev);
812err_bl: 1109err_bl:
813 destroy_workqueue(td->esd_wq); 1110 destroy_workqueue(td->workqueue);
814err_wq: 1111err_wq:
815 free_regulators(panel_config->regulators, panel_config->num_regulators); 1112 free_regulators(panel_config->regulators, panel_config->num_regulators);
816err_reg: 1113err_reg:
@@ -819,7 +1116,7 @@ err:
819 return r; 1116 return r;
820} 1117}
821 1118
822static void taal_remove(struct omap_dss_device *dssdev) 1119static void __exit taal_remove(struct omap_dss_device *dssdev)
823{ 1120{
824 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1121 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
825 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); 1122 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
@@ -841,8 +1138,9 @@ static void taal_remove(struct omap_dss_device *dssdev)
841 taal_bl_update_status(bldev); 1138 taal_bl_update_status(bldev);
842 backlight_device_unregister(bldev); 1139 backlight_device_unregister(bldev);
843 1140
844 cancel_delayed_work(&td->esd_work); 1141 taal_cancel_ulps_work(dssdev);
845 destroy_workqueue(td->esd_wq); 1142 taal_cancel_esd_work(dssdev);
1143 destroy_workqueue(td->workqueue);
846 1144
847 /* reset, to be sure that the panel is in a valid state */ 1145 /* reset, to be sure that the panel is in a valid state */
848 taal_hw_reset(dssdev); 1146 taal_hw_reset(dssdev);
@@ -867,7 +1165,7 @@ static int taal_power_on(struct omap_dss_device *dssdev)
867 1165
868 taal_hw_reset(dssdev); 1166 taal_hw_reset(dssdev);
869 1167
870 omapdss_dsi_vc_enable_hs(td->channel, false); 1168 omapdss_dsi_vc_enable_hs(dssdev, td->channel, false);
871 1169
872 r = taal_sleep_out(td); 1170 r = taal_sleep_out(td);
873 if (r) 1171 if (r)
@@ -924,7 +1222,7 @@ static int taal_power_on(struct omap_dss_device *dssdev)
924 td->intro_printed = true; 1222 td->intro_printed = true;
925 } 1223 }
926 1224
927 omapdss_dsi_vc_enable_hs(td->channel, true); 1225 omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
928 1226
929 return 0; 1227 return 0;
930err: 1228err:
@@ -932,7 +1230,7 @@ err:
932 1230
933 taal_hw_reset(dssdev); 1231 taal_hw_reset(dssdev);
934 1232
935 omapdss_dsi_display_disable(dssdev); 1233 omapdss_dsi_display_disable(dssdev, true, false);
936err0: 1234err0:
937 return r; 1235 return r;
938} 1236}
@@ -955,15 +1253,23 @@ static void taal_power_off(struct omap_dss_device *dssdev)
955 taal_hw_reset(dssdev); 1253 taal_hw_reset(dssdev);
956 } 1254 }
957 1255
958 omapdss_dsi_display_disable(dssdev); 1256 omapdss_dsi_display_disable(dssdev, true, false);
959 1257
960 td->enabled = 0; 1258 td->enabled = 0;
961} 1259}
962 1260
1261static int taal_panel_reset(struct omap_dss_device *dssdev)
1262{
1263 dev_err(&dssdev->dev, "performing LCD reset\n");
1264
1265 taal_power_off(dssdev);
1266 taal_hw_reset(dssdev);
1267 return taal_power_on(dssdev);
1268}
1269
963static int taal_enable(struct omap_dss_device *dssdev) 1270static int taal_enable(struct omap_dss_device *dssdev)
964{ 1271{
965 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1272 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
966 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
967 int r; 1273 int r;
968 1274
969 dev_dbg(&dssdev->dev, "enable\n"); 1275 dev_dbg(&dssdev->dev, "enable\n");
@@ -975,18 +1281,16 @@ static int taal_enable(struct omap_dss_device *dssdev)
975 goto err; 1281 goto err;
976 } 1282 }
977 1283
978 dsi_bus_lock(); 1284 dsi_bus_lock(dssdev);
979 1285
980 r = taal_power_on(dssdev); 1286 r = taal_power_on(dssdev);
981 1287
982 dsi_bus_unlock(); 1288 dsi_bus_unlock(dssdev);
983 1289
984 if (r) 1290 if (r)
985 goto err; 1291 goto err;
986 1292
987 if (panel_data->use_esd_check) 1293 taal_queue_esd_work(dssdev);
988 queue_delayed_work(td->esd_wq, &td->esd_work,
989 TAAL_ESD_CHECK_PERIOD);
990 1294
991 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 1295 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
992 1296
@@ -1007,14 +1311,17 @@ static void taal_disable(struct omap_dss_device *dssdev)
1007 1311
1008 mutex_lock(&td->lock); 1312 mutex_lock(&td->lock);
1009 1313
1010 cancel_delayed_work(&td->esd_work); 1314 taal_cancel_ulps_work(dssdev);
1315 taal_cancel_esd_work(dssdev);
1011 1316
1012 dsi_bus_lock(); 1317 dsi_bus_lock(dssdev);
1013 1318
1014 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) 1319 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
1320 taal_wake_up(dssdev);
1015 taal_power_off(dssdev); 1321 taal_power_off(dssdev);
1322 }
1016 1323
1017 dsi_bus_unlock(); 1324 dsi_bus_unlock(dssdev);
1018 1325
1019 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 1326 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
1020 1327
@@ -1035,13 +1342,16 @@ static int taal_suspend(struct omap_dss_device *dssdev)
1035 goto err; 1342 goto err;
1036 } 1343 }
1037 1344
1038 cancel_delayed_work(&td->esd_work); 1345 taal_cancel_ulps_work(dssdev);
1346 taal_cancel_esd_work(dssdev);
1039 1347
1040 dsi_bus_lock(); 1348 dsi_bus_lock(dssdev);
1041 1349
1042 taal_power_off(dssdev); 1350 r = taal_wake_up(dssdev);
1351 if (!r)
1352 taal_power_off(dssdev);
1043 1353
1044 dsi_bus_unlock(); 1354 dsi_bus_unlock(dssdev);
1045 1355
1046 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; 1356 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
1047 1357
@@ -1056,7 +1366,6 @@ err:
1056static int taal_resume(struct omap_dss_device *dssdev) 1366static int taal_resume(struct omap_dss_device *dssdev)
1057{ 1367{
1058 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 1368 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
1059 struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev);
1060 int r; 1369 int r;
1061 1370
1062 dev_dbg(&dssdev->dev, "resume\n"); 1371 dev_dbg(&dssdev->dev, "resume\n");
@@ -1068,19 +1377,17 @@ static int taal_resume(struct omap_dss_device *dssdev)
1068 goto err; 1377 goto err;
1069 } 1378 }
1070 1379
1071 dsi_bus_lock(); 1380 dsi_bus_lock(dssdev);
1072 1381
1073 r = taal_power_on(dssdev); 1382 r = taal_power_on(dssdev);
1074 1383
1075 dsi_bus_unlock(); 1384 dsi_bus_unlock(dssdev);
1076 1385
1077 if (r) { 1386 if (r) {
1078 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 1387 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
1079 } else { 1388 } else {
1080 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 1389 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
1081 if (panel_data->use_esd_check) 1390 taal_queue_esd_work(dssdev);
1082 queue_delayed_work(td->esd_wq, &td->esd_work,
1083 TAAL_ESD_CHECK_PERIOD);
1084 } 1391 }
1085 1392
1086 mutex_unlock(&td->lock); 1393 mutex_unlock(&td->lock);
@@ -1095,7 +1402,7 @@ static void taal_framedone_cb(int err, void *data)
1095{ 1402{
1096 struct omap_dss_device *dssdev = data; 1403 struct omap_dss_device *dssdev = data;
1097 dev_dbg(&dssdev->dev, "framedone, err %d\n", err); 1404 dev_dbg(&dssdev->dev, "framedone, err %d\n", err);
1098 dsi_bus_unlock(); 1405 dsi_bus_unlock(dssdev);
1099} 1406}
1100 1407
1101static irqreturn_t taal_te_isr(int irq, void *data) 1408static irqreturn_t taal_te_isr(int irq, void *data)
@@ -1123,7 +1430,7 @@ static irqreturn_t taal_te_isr(int irq, void *data)
1123 return IRQ_HANDLED; 1430 return IRQ_HANDLED;
1124err: 1431err:
1125 dev_err(&dssdev->dev, "start update failed\n"); 1432 dev_err(&dssdev->dev, "start update failed\n");
1126 dsi_bus_unlock(); 1433 dsi_bus_unlock(dssdev);
1127 return IRQ_HANDLED; 1434 return IRQ_HANDLED;
1128} 1435}
1129 1436
@@ -1136,7 +1443,7 @@ static void taal_te_timeout_work_callback(struct work_struct *work)
1136 dev_err(&dssdev->dev, "TE not received for 250ms!\n"); 1443 dev_err(&dssdev->dev, "TE not received for 250ms!\n");
1137 1444
1138 atomic_set(&td->do_update, 0); 1445 atomic_set(&td->do_update, 0);
1139 dsi_bus_unlock(); 1446 dsi_bus_unlock(dssdev);
1140} 1447}
1141 1448
1142static int taal_update(struct omap_dss_device *dssdev, 1449static int taal_update(struct omap_dss_device *dssdev,
@@ -1149,7 +1456,11 @@ static int taal_update(struct omap_dss_device *dssdev,
1149 dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); 1456 dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
1150 1457
1151 mutex_lock(&td->lock); 1458 mutex_lock(&td->lock);
1152 dsi_bus_lock(); 1459 dsi_bus_lock(dssdev);
1460
1461 r = taal_wake_up(dssdev);
1462 if (r)
1463 goto err;
1153 1464
1154 if (!td->enabled) { 1465 if (!td->enabled) {
1155 r = 0; 1466 r = 0;
@@ -1184,7 +1495,7 @@ static int taal_update(struct omap_dss_device *dssdev,
1184 mutex_unlock(&td->lock); 1495 mutex_unlock(&td->lock);
1185 return 0; 1496 return 0;
1186err: 1497err:
1187 dsi_bus_unlock(); 1498 dsi_bus_unlock(dssdev);
1188 mutex_unlock(&td->lock); 1499 mutex_unlock(&td->lock);
1189 return r; 1500 return r;
1190} 1501}
@@ -1196,8 +1507,8 @@ static int taal_sync(struct omap_dss_device *dssdev)
1196 dev_dbg(&dssdev->dev, "sync\n"); 1507 dev_dbg(&dssdev->dev, "sync\n");
1197 1508
1198 mutex_lock(&td->lock); 1509 mutex_lock(&td->lock);
1199 dsi_bus_lock(); 1510 dsi_bus_lock(dssdev);
1200 dsi_bus_unlock(); 1511 dsi_bus_unlock(dssdev);
1201 mutex_unlock(&td->lock); 1512 mutex_unlock(&td->lock);
1202 1513
1203 dev_dbg(&dssdev->dev, "sync done\n"); 1514 dev_dbg(&dssdev->dev, "sync done\n");
@@ -1235,9 +1546,13 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
1235 if (td->te_enabled == enable) 1546 if (td->te_enabled == enable)
1236 goto end; 1547 goto end;
1237 1548
1238 dsi_bus_lock(); 1549 dsi_bus_lock(dssdev);
1239 1550
1240 if (td->enabled) { 1551 if (td->enabled) {
1552 r = taal_wake_up(dssdev);
1553 if (r)
1554 goto err;
1555
1241 r = _taal_enable_te(dssdev, enable); 1556 r = _taal_enable_te(dssdev, enable);
1242 if (r) 1557 if (r)
1243 goto err; 1558 goto err;
@@ -1245,13 +1560,13 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
1245 1560
1246 td->te_enabled = enable; 1561 td->te_enabled = enable;
1247 1562
1248 dsi_bus_unlock(); 1563 dsi_bus_unlock(dssdev);
1249end: 1564end:
1250 mutex_unlock(&td->lock); 1565 mutex_unlock(&td->lock);
1251 1566
1252 return 0; 1567 return 0;
1253err: 1568err:
1254 dsi_bus_unlock(); 1569 dsi_bus_unlock(dssdev);
1255 mutex_unlock(&td->lock); 1570 mutex_unlock(&td->lock);
1256 1571
1257 return r; 1572 return r;
@@ -1281,9 +1596,13 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
1281 if (td->rotate == rotate) 1596 if (td->rotate == rotate)
1282 goto end; 1597 goto end;
1283 1598
1284 dsi_bus_lock(); 1599 dsi_bus_lock(dssdev);
1285 1600
1286 if (td->enabled) { 1601 if (td->enabled) {
1602 r = taal_wake_up(dssdev);
1603 if (r)
1604 goto err;
1605
1287 r = taal_set_addr_mode(td, rotate, td->mirror); 1606 r = taal_set_addr_mode(td, rotate, td->mirror);
1288 if (r) 1607 if (r)
1289 goto err; 1608 goto err;
@@ -1291,12 +1610,12 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
1291 1610
1292 td->rotate = rotate; 1611 td->rotate = rotate;
1293 1612
1294 dsi_bus_unlock(); 1613 dsi_bus_unlock(dssdev);
1295end: 1614end:
1296 mutex_unlock(&td->lock); 1615 mutex_unlock(&td->lock);
1297 return 0; 1616 return 0;
1298err: 1617err:
1299 dsi_bus_unlock(); 1618 dsi_bus_unlock(dssdev);
1300 mutex_unlock(&td->lock); 1619 mutex_unlock(&td->lock);
1301 return r; 1620 return r;
1302} 1621}
@@ -1325,8 +1644,12 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
1325 if (td->mirror == enable) 1644 if (td->mirror == enable)
1326 goto end; 1645 goto end;
1327 1646
1328 dsi_bus_lock(); 1647 dsi_bus_lock(dssdev);
1329 if (td->enabled) { 1648 if (td->enabled) {
1649 r = taal_wake_up(dssdev);
1650 if (r)
1651 goto err;
1652
1330 r = taal_set_addr_mode(td, td->rotate, enable); 1653 r = taal_set_addr_mode(td, td->rotate, enable);
1331 if (r) 1654 if (r)
1332 goto err; 1655 goto err;
@@ -1334,12 +1657,12 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
1334 1657
1335 td->mirror = enable; 1658 td->mirror = enable;
1336 1659
1337 dsi_bus_unlock(); 1660 dsi_bus_unlock(dssdev);
1338end: 1661end:
1339 mutex_unlock(&td->lock); 1662 mutex_unlock(&td->lock);
1340 return 0; 1663 return 0;
1341err: 1664err:
1342 dsi_bus_unlock(); 1665 dsi_bus_unlock(dssdev);
1343 mutex_unlock(&td->lock); 1666 mutex_unlock(&td->lock);
1344 return r; 1667 return r;
1345} 1668}
@@ -1369,7 +1692,11 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
1369 goto err1; 1692 goto err1;
1370 } 1693 }
1371 1694
1372 dsi_bus_lock(); 1695 dsi_bus_lock(dssdev);
1696
1697 r = taal_wake_up(dssdev);
1698 if (r)
1699 goto err2;
1373 1700
1374 r = taal_dcs_read_1(td, DCS_GET_ID1, &id1); 1701 r = taal_dcs_read_1(td, DCS_GET_ID1, &id1);
1375 if (r) 1702 if (r)
@@ -1381,11 +1708,11 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
1381 if (r) 1708 if (r)
1382 goto err2; 1709 goto err2;
1383 1710
1384 dsi_bus_unlock(); 1711 dsi_bus_unlock(dssdev);
1385 mutex_unlock(&td->lock); 1712 mutex_unlock(&td->lock);
1386 return 0; 1713 return 0;
1387err2: 1714err2:
1388 dsi_bus_unlock(); 1715 dsi_bus_unlock(dssdev);
1389err1: 1716err1:
1390 mutex_unlock(&td->lock); 1717 mutex_unlock(&td->lock);
1391 return r; 1718 return r;
@@ -1415,7 +1742,11 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
1415 dssdev->panel.timings.x_res * 1742 dssdev->panel.timings.x_res *
1416 dssdev->panel.timings.y_res * 3); 1743 dssdev->panel.timings.y_res * 3);
1417 1744
1418 dsi_bus_lock(); 1745 dsi_bus_lock(dssdev);
1746
1747 r = taal_wake_up(dssdev);
1748 if (r)
1749 goto err2;
1419 1750
1420 /* plen 1 or 2 goes into short packet. until checksum error is fixed, 1751 /* plen 1 or 2 goes into short packet. until checksum error is fixed,
1421 * use short packets. plen 32 works, but bigger packets seem to cause 1752 * use short packets. plen 32 works, but bigger packets seem to cause
@@ -1427,7 +1758,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
1427 1758
1428 taal_set_update_window(td, x, y, w, h); 1759 taal_set_update_window(td, x, y, w, h);
1429 1760
1430 r = dsi_vc_set_max_rx_packet_size(td->channel, plen); 1761 r = dsi_vc_set_max_rx_packet_size(dssdev, td->channel, plen);
1431 if (r) 1762 if (r)
1432 goto err2; 1763 goto err2;
1433 1764
@@ -1435,7 +1766,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
1435 u8 dcs_cmd = first ? 0x2e : 0x3e; 1766 u8 dcs_cmd = first ? 0x2e : 0x3e;
1436 first = 0; 1767 first = 0;
1437 1768
1438 r = dsi_vc_dcs_read(td->channel, dcs_cmd, 1769 r = dsi_vc_dcs_read(dssdev, td->channel, dcs_cmd,
1439 buf + buf_used, size - buf_used); 1770 buf + buf_used, size - buf_used);
1440 1771
1441 if (r < 0) { 1772 if (r < 0) {
@@ -1461,14 +1792,35 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
1461 r = buf_used; 1792 r = buf_used;
1462 1793
1463err3: 1794err3:
1464 dsi_vc_set_max_rx_packet_size(td->channel, 1); 1795 dsi_vc_set_max_rx_packet_size(dssdev, td->channel, 1);
1465err2: 1796err2:
1466 dsi_bus_unlock(); 1797 dsi_bus_unlock(dssdev);
1467err1: 1798err1:
1468 mutex_unlock(&td->lock); 1799 mutex_unlock(&td->lock);
1469 return r; 1800 return r;
1470} 1801}
1471 1802
1803static void taal_ulps_work(struct work_struct *work)
1804{
1805 struct taal_data *td = container_of(work, struct taal_data,
1806 ulps_work.work);
1807 struct omap_dss_device *dssdev = td->dssdev;
1808
1809 mutex_lock(&td->lock);
1810
1811 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !td->enabled) {
1812 mutex_unlock(&td->lock);
1813 return;
1814 }
1815
1816 dsi_bus_lock(dssdev);
1817
1818 taal_enter_ulps(dssdev);
1819
1820 dsi_bus_unlock(dssdev);
1821 mutex_unlock(&td->lock);
1822}
1823
1472static void taal_esd_work(struct work_struct *work) 1824static void taal_esd_work(struct work_struct *work)
1473{ 1825{
1474 struct taal_data *td = container_of(work, struct taal_data, 1826 struct taal_data *td = container_of(work, struct taal_data,
@@ -1485,7 +1837,13 @@ static void taal_esd_work(struct work_struct *work)
1485 return; 1837 return;
1486 } 1838 }
1487 1839
1488 dsi_bus_lock(); 1840 dsi_bus_lock(dssdev);
1841
1842 r = taal_wake_up(dssdev);
1843 if (r) {
1844 dev_err(&dssdev->dev, "failed to exit ULPS\n");
1845 goto err;
1846 }
1489 1847
1490 r = taal_dcs_read_1(td, DCS_RDDSDR, &state1); 1848 r = taal_dcs_read_1(td, DCS_RDDSDR, &state1);
1491 if (r) { 1849 if (r) {
@@ -1521,22 +1879,20 @@ static void taal_esd_work(struct work_struct *work)
1521 goto err; 1879 goto err;
1522 } 1880 }
1523 1881
1524 dsi_bus_unlock(); 1882 dsi_bus_unlock(dssdev);
1525 1883
1526 queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD); 1884 taal_queue_esd_work(dssdev);
1527 1885
1528 mutex_unlock(&td->lock); 1886 mutex_unlock(&td->lock);
1529 return; 1887 return;
1530err: 1888err:
1531 dev_err(&dssdev->dev, "performing LCD reset\n"); 1889 dev_err(&dssdev->dev, "performing LCD reset\n");
1532 1890
1533 taal_power_off(dssdev); 1891 taal_panel_reset(dssdev);
1534 taal_hw_reset(dssdev);
1535 taal_power_on(dssdev);
1536 1892
1537 dsi_bus_unlock(); 1893 dsi_bus_unlock(dssdev);
1538 1894
1539 queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD); 1895 taal_queue_esd_work(dssdev);
1540 1896
1541 mutex_unlock(&td->lock); 1897 mutex_unlock(&td->lock);
1542} 1898}
@@ -1557,7 +1913,7 @@ static enum omap_dss_update_mode taal_get_update_mode(
1557 1913
1558static struct omap_dss_driver taal_driver = { 1914static struct omap_dss_driver taal_driver = {
1559 .probe = taal_probe, 1915 .probe = taal_probe,
1560 .remove = taal_remove, 1916 .remove = __exit_p(taal_remove),
1561 1917
1562 .enable = taal_enable, 1918 .enable = taal_enable,
1563 .disable = taal_disable, 1919 .disable = taal_disable,
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index dbe9d43b4850..2462b9ec6662 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -17,7 +17,7 @@
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20#include <plat/display.h> 20#include <video/omapdss.h>
21 21
22#define TPO_R02_MODE(x) ((x) & 7) 22#define TPO_R02_MODE(x) ((x) & 7)
23#define TPO_R02_MODE_800x480 7 23#define TPO_R02_MODE_800x480 7
@@ -144,13 +144,15 @@ static ssize_t tpo_td043_vmirror_store(struct device *dev,
144 struct device_attribute *attr, const char *buf, size_t count) 144 struct device_attribute *attr, const char *buf, size_t count)
145{ 145{
146 struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); 146 struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
147 long val; 147 int val;
148 int ret; 148 int ret;
149 149
150 ret = strict_strtol(buf, 0, &val); 150 ret = kstrtoint(buf, 0, &val);
151 if (ret < 0) 151 if (ret < 0)
152 return ret; 152 return ret;
153 153
154 val = !!val;
155
154 ret = tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror, val); 156 ret = tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror, val);
155 if (ret < 0) 157 if (ret < 0)
156 return ret; 158 return ret;
@@ -175,7 +177,7 @@ static ssize_t tpo_td043_mode_store(struct device *dev,
175 long val; 177 long val;
176 int ret; 178 int ret;
177 179
178 ret = strict_strtol(buf, 0, &val); 180 ret = kstrtol(buf, 0, &val);
179 if (ret != 0 || val & ~7) 181 if (ret != 0 || val & ~7)
180 return -EINVAL; 182 return -EINVAL;
181 183
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index bfc5da0e9700..6b3e2da11419 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -80,7 +80,7 @@ config OMAP2_DSS_SDI
80 80
81config OMAP2_DSS_DSI 81config OMAP2_DSS_DSI
82 bool "DSI support" 82 bool "DSI support"
83 depends on ARCH_OMAP3 83 depends on ARCH_OMAP3 || ARCH_OMAP4
84 default n 84 default n
85 help 85 help
86 MIPI DSI (Display Serial Interface) support. 86 MIPI DSI (Display Serial Interface) support.
@@ -90,14 +90,6 @@ config OMAP2_DSS_DSI
90 90
91 See http://www.mipi.org/ for DSI spesifications. 91 See http://www.mipi.org/ for DSI spesifications.
92 92
93config OMAP2_DSS_USE_DSI_PLL
94 bool "Use DSI PLL for PCLK (EXPERIMENTAL)"
95 default n
96 depends on OMAP2_DSS_DSI
97 help
98 Use DSI PLL to generate pixel clock. Currently only for DPI output.
99 DSI PLL can be used to generate higher and more precise pixel clocks.
100
101config OMAP2_DSS_FAKE_VSYNC 93config OMAP2_DSS_FAKE_VSYNC
102 bool "Fake VSYNC irq from manual update displays" 94 bool "Fake VSYNC irq from manual update displays"
103 default n 95 default n
@@ -125,4 +117,27 @@ config OMAP2_DSS_MIN_FCK_PER_PCK
125 Max FCK is 173MHz, so this doesn't work if your PCK 117 Max FCK is 173MHz, so this doesn't work if your PCK
126 is very high. 118 is very high.
127 119
120config OMAP2_DSS_SLEEP_BEFORE_RESET
121 bool "Sleep 50ms before DSS reset"
122 default y
123 help
124 For some unknown reason we may get SYNC_LOST errors from the display
125 subsystem at initialization time if we don't sleep before resetting
126 the DSS. See the source (dss.c) for more comments.
127
128 However, 50ms is quite long time to sleep, and with some
129 configurations the SYNC_LOST may never happen, so the sleep can
130 be disabled here.
131
132config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
133 bool "Sleep 20ms after VENC reset"
134 default y
135 help
136 There is a 20ms sleep after VENC reset which seemed to fix the
137 reset. The reason for the bug is unclear, and it's also unclear
138 on what platforms this happens.
139
140 This option enables the sleep, and is enabled by default. You can
141 disable the sleep if it doesn't cause problems on your platform.
142
128endif 143endif
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 1aa2ed1e786e..3da426719dd6 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -33,7 +33,7 @@
33#include <linux/device.h> 33#include <linux/device.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35 35
36#include <plat/display.h> 36#include <video/omapdss.h>
37 37
38#include "dss.h" 38#include "dss.h"
39#include "dss_features.h" 39#include "dss_features.h"
@@ -54,6 +54,9 @@ unsigned int dss_debug;
54module_param_named(debug, dss_debug, bool, 0644); 54module_param_named(debug, dss_debug, bool, 0644);
55#endif 55#endif
56 56
57static int omap_dss_register_device(struct omap_dss_device *);
58static void omap_dss_unregister_device(struct omap_dss_device *);
59
57/* REGULATORS */ 60/* REGULATORS */
58 61
59struct regulator *dss_get_vdds_dsi(void) 62struct regulator *dss_get_vdds_dsi(void)
@@ -124,8 +127,7 @@ static int dss_initialize_debugfs(void)
124#endif 127#endif
125 128
126#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS) 129#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
127 debugfs_create_file("dsi_irq", S_IRUGO, dss_debugfs_dir, 130 dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops);
128 &dsi_dump_irqs, &dss_debug_fops);
129#endif 131#endif
130 132
131 debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir, 133 debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
@@ -137,8 +139,7 @@ static int dss_initialize_debugfs(void)
137 &rfbi_dump_regs, &dss_debug_fops); 139 &rfbi_dump_regs, &dss_debug_fops);
138#endif 140#endif
139#ifdef CONFIG_OMAP2_DSS_DSI 141#ifdef CONFIG_OMAP2_DSS_DSI
140 debugfs_create_file("dsi", S_IRUGO, dss_debugfs_dir, 142 dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops);
141 &dsi_dump_regs, &dss_debug_fops);
142#endif 143#endif
143#ifdef CONFIG_OMAP2_DSS_VENC 144#ifdef CONFIG_OMAP2_DSS_VENC
144 debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir, 145 debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
@@ -480,7 +481,7 @@ static void omap_dss_dev_release(struct device *dev)
480 reset_device(dev, 0); 481 reset_device(dev, 0);
481} 482}
482 483
483int omap_dss_register_device(struct omap_dss_device *dssdev) 484static int omap_dss_register_device(struct omap_dss_device *dssdev)
484{ 485{
485 static int dev_num; 486 static int dev_num;
486 487
@@ -494,7 +495,7 @@ int omap_dss_register_device(struct omap_dss_device *dssdev)
494 return device_register(&dssdev->dev); 495 return device_register(&dssdev->dev);
495} 496}
496 497
497void omap_dss_unregister_device(struct omap_dss_device *dssdev) 498static void omap_dss_unregister_device(struct omap_dss_device *dssdev)
498{ 499{
499 device_unregister(&dssdev->dev); 500 device_unregister(&dssdev->dev);
500} 501}
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 7804779c9da1..7a9a2e7d9685 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -37,99 +37,15 @@
37#include <plat/sram.h> 37#include <plat/sram.h>
38#include <plat/clock.h> 38#include <plat/clock.h>
39 39
40#include <plat/display.h> 40#include <video/omapdss.h>
41 41
42#include "dss.h" 42#include "dss.h"
43#include "dss_features.h" 43#include "dss_features.h"
44#include "dispc.h"
44 45
45/* DISPC */ 46/* DISPC */
46#define DISPC_SZ_REGS SZ_4K 47#define DISPC_SZ_REGS SZ_4K
47 48
48struct dispc_reg { u16 idx; };
49
50#define DISPC_REG(idx) ((const struct dispc_reg) { idx })
51
52/*
53 * DISPC common registers and
54 * DISPC channel registers , ch = 0 for LCD, ch = 1 for
55 * DIGIT, and ch = 2 for LCD2
56 */
57#define DISPC_REVISION DISPC_REG(0x0000)
58#define DISPC_SYSCONFIG DISPC_REG(0x0010)
59#define DISPC_SYSSTATUS DISPC_REG(0x0014)
60#define DISPC_IRQSTATUS DISPC_REG(0x0018)
61#define DISPC_IRQENABLE DISPC_REG(0x001C)
62#define DISPC_CONTROL DISPC_REG(0x0040)
63#define DISPC_CONTROL2 DISPC_REG(0x0238)
64#define DISPC_CONFIG DISPC_REG(0x0044)
65#define DISPC_CONFIG2 DISPC_REG(0x0620)
66#define DISPC_CAPABLE DISPC_REG(0x0048)
67#define DISPC_DEFAULT_COLOR(ch) DISPC_REG(ch == 0 ? 0x004C : \
68 (ch == 1 ? 0x0050 : 0x03AC))
69#define DISPC_TRANS_COLOR(ch) DISPC_REG(ch == 0 ? 0x0054 : \
70 (ch == 1 ? 0x0058 : 0x03B0))
71#define DISPC_LINE_STATUS DISPC_REG(0x005C)
72#define DISPC_LINE_NUMBER DISPC_REG(0x0060)
73#define DISPC_TIMING_H(ch) DISPC_REG(ch != 2 ? 0x0064 : 0x0400)
74#define DISPC_TIMING_V(ch) DISPC_REG(ch != 2 ? 0x0068 : 0x0404)
75#define DISPC_POL_FREQ(ch) DISPC_REG(ch != 2 ? 0x006C : 0x0408)
76#define DISPC_DIVISORo(ch) DISPC_REG(ch != 2 ? 0x0070 : 0x040C)
77#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074)
78#define DISPC_SIZE_DIG DISPC_REG(0x0078)
79#define DISPC_SIZE_LCD(ch) DISPC_REG(ch != 2 ? 0x007C : 0x03CC)
80
81/* DISPC GFX plane */
82#define DISPC_GFX_BA0 DISPC_REG(0x0080)
83#define DISPC_GFX_BA1 DISPC_REG(0x0084)
84#define DISPC_GFX_POSITION DISPC_REG(0x0088)
85#define DISPC_GFX_SIZE DISPC_REG(0x008C)
86#define DISPC_GFX_ATTRIBUTES DISPC_REG(0x00A0)
87#define DISPC_GFX_FIFO_THRESHOLD DISPC_REG(0x00A4)
88#define DISPC_GFX_FIFO_SIZE_STATUS DISPC_REG(0x00A8)
89#define DISPC_GFX_ROW_INC DISPC_REG(0x00AC)
90#define DISPC_GFX_PIXEL_INC DISPC_REG(0x00B0)
91#define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4)
92#define DISPC_GFX_TABLE_BA DISPC_REG(0x00B8)
93
94#define DISPC_DATA_CYCLE1(ch) DISPC_REG(ch != 2 ? 0x01D4 : 0x03C0)
95#define DISPC_DATA_CYCLE2(ch) DISPC_REG(ch != 2 ? 0x01D8 : 0x03C4)
96#define DISPC_DATA_CYCLE3(ch) DISPC_REG(ch != 2 ? 0x01DC : 0x03C8)
97#define DISPC_CPR_COEF_R(ch) DISPC_REG(ch != 2 ? 0x0220 : 0x03BC)
98#define DISPC_CPR_COEF_G(ch) DISPC_REG(ch != 2 ? 0x0224 : 0x03B8)
99#define DISPC_CPR_COEF_B(ch) DISPC_REG(ch != 2 ? 0x0228 : 0x03B4)
100
101#define DISPC_GFX_PRELOAD DISPC_REG(0x022C)
102
103/* DISPC Video plane, n = 0 for VID1 and n = 1 for VID2 */
104#define DISPC_VID_REG(n, idx) DISPC_REG(0x00BC + (n)*0x90 + idx)
105
106#define DISPC_VID_BA0(n) DISPC_VID_REG(n, 0x0000)
107#define DISPC_VID_BA1(n) DISPC_VID_REG(n, 0x0004)
108#define DISPC_VID_POSITION(n) DISPC_VID_REG(n, 0x0008)
109#define DISPC_VID_SIZE(n) DISPC_VID_REG(n, 0x000C)
110#define DISPC_VID_ATTRIBUTES(n) DISPC_VID_REG(n, 0x0010)
111#define DISPC_VID_FIFO_THRESHOLD(n) DISPC_VID_REG(n, 0x0014)
112#define DISPC_VID_FIFO_SIZE_STATUS(n) DISPC_VID_REG(n, 0x0018)
113#define DISPC_VID_ROW_INC(n) DISPC_VID_REG(n, 0x001C)
114#define DISPC_VID_PIXEL_INC(n) DISPC_VID_REG(n, 0x0020)
115#define DISPC_VID_FIR(n) DISPC_VID_REG(n, 0x0024)
116#define DISPC_VID_PICTURE_SIZE(n) DISPC_VID_REG(n, 0x0028)
117#define DISPC_VID_ACCU0(n) DISPC_VID_REG(n, 0x002C)
118#define DISPC_VID_ACCU1(n) DISPC_VID_REG(n, 0x0030)
119
120/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
121#define DISPC_VID_FIR_COEF_H(n, i) DISPC_REG(0x00F0 + (n)*0x90 + (i)*0x8)
122/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
123#define DISPC_VID_FIR_COEF_HV(n, i) DISPC_REG(0x00F4 + (n)*0x90 + (i)*0x8)
124/* coef index i = {0, 1, 2, 3, 4} */
125#define DISPC_VID_CONV_COEF(n, i) DISPC_REG(0x0130 + (n)*0x90 + (i)*0x4)
126/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
127#define DISPC_VID_FIR_COEF_V(n, i) DISPC_REG(0x01E0 + (n)*0x20 + (i)*0x4)
128
129#define DISPC_VID_PRELOAD(n) DISPC_REG(0x230 + (n)*0x04)
130
131#define DISPC_DIVISOR DISPC_REG(0x0804)
132
133#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \ 49#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
134 DISPC_IRQ_OCP_ERR | \ 50 DISPC_IRQ_OCP_ERR | \
135 DISPC_IRQ_VID1_FIFO_UNDERFLOW | \ 51 DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
@@ -167,10 +83,6 @@ struct dispc_v_coef {
167#define REG_FLD_MOD(idx, val, start, end) \ 83#define REG_FLD_MOD(idx, val, start, end) \
168 dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end)) 84 dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
169 85
170static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES,
171 DISPC_VID_ATTRIBUTES(0),
172 DISPC_VID_ATTRIBUTES(1) };
173
174struct dispc_irq_stats { 86struct dispc_irq_stats {
175 unsigned long last_reset; 87 unsigned long last_reset;
176 unsigned irq_count; 88 unsigned irq_count;
@@ -198,25 +110,38 @@ static struct {
198#endif 110#endif
199} dispc; 111} dispc;
200 112
113enum omap_color_component {
114 /* used for all color formats for OMAP3 and earlier
115 * and for RGB and Y color component on OMAP4
116 */
117 DISPC_COLOR_COMPONENT_RGB_Y = 1 << 0,
118 /* used for UV component for
119 * OMAP_DSS_COLOR_YUV2, OMAP_DSS_COLOR_UYVY, OMAP_DSS_COLOR_NV12
120 * color formats on OMAP4
121 */
122 DISPC_COLOR_COMPONENT_UV = 1 << 1,
123};
124
201static void _omap_dispc_set_irqs(void); 125static void _omap_dispc_set_irqs(void);
202 126
203static inline void dispc_write_reg(const struct dispc_reg idx, u32 val) 127static inline void dispc_write_reg(const u16 idx, u32 val)
204{ 128{
205 __raw_writel(val, dispc.base + idx.idx); 129 __raw_writel(val, dispc.base + idx);
206} 130}
207 131
208static inline u32 dispc_read_reg(const struct dispc_reg idx) 132static inline u32 dispc_read_reg(const u16 idx)
209{ 133{
210 return __raw_readl(dispc.base + idx.idx); 134 return __raw_readl(dispc.base + idx);
211} 135}
212 136
213#define SR(reg) \ 137#define SR(reg) \
214 dispc.ctx[(DISPC_##reg).idx / sizeof(u32)] = dispc_read_reg(DISPC_##reg) 138 dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
215#define RR(reg) \ 139#define RR(reg) \
216 dispc_write_reg(DISPC_##reg, dispc.ctx[(DISPC_##reg).idx / sizeof(u32)]) 140 dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)])
217 141
218void dispc_save_context(void) 142void dispc_save_context(void)
219{ 143{
144 int i;
220 if (cpu_is_omap24xx()) 145 if (cpu_is_omap24xx())
221 return; 146 return;
222 147
@@ -224,157 +149,153 @@ void dispc_save_context(void)
224 SR(IRQENABLE); 149 SR(IRQENABLE);
225 SR(CONTROL); 150 SR(CONTROL);
226 SR(CONFIG); 151 SR(CONFIG);
227 SR(DEFAULT_COLOR(0)); 152 SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD));
228 SR(DEFAULT_COLOR(1)); 153 SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT));
229 SR(TRANS_COLOR(0)); 154 SR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD));
230 SR(TRANS_COLOR(1)); 155 SR(TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT));
231 SR(LINE_NUMBER); 156 SR(LINE_NUMBER);
232 SR(TIMING_H(0)); 157 SR(TIMING_H(OMAP_DSS_CHANNEL_LCD));
233 SR(TIMING_V(0)); 158 SR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
234 SR(POL_FREQ(0)); 159 SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
235 SR(DIVISORo(0)); 160 SR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
236 SR(GLOBAL_ALPHA); 161 SR(GLOBAL_ALPHA);
237 SR(SIZE_DIG); 162 SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
238 SR(SIZE_LCD(0)); 163 SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
239 if (dss_has_feature(FEAT_MGR_LCD2)) { 164 if (dss_has_feature(FEAT_MGR_LCD2)) {
240 SR(CONTROL2); 165 SR(CONTROL2);
241 SR(DEFAULT_COLOR(2)); 166 SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2));
242 SR(TRANS_COLOR(2)); 167 SR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2));
243 SR(SIZE_LCD(2)); 168 SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD2));
244 SR(TIMING_H(2)); 169 SR(TIMING_H(OMAP_DSS_CHANNEL_LCD2));
245 SR(TIMING_V(2)); 170 SR(TIMING_V(OMAP_DSS_CHANNEL_LCD2));
246 SR(POL_FREQ(2)); 171 SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD2));
247 SR(DIVISORo(2)); 172 SR(DIVISORo(OMAP_DSS_CHANNEL_LCD2));
248 SR(CONFIG2); 173 SR(CONFIG2);
249 } 174 }
250 175
251 SR(GFX_BA0); 176 SR(OVL_BA0(OMAP_DSS_GFX));
252 SR(GFX_BA1); 177 SR(OVL_BA1(OMAP_DSS_GFX));
253 SR(GFX_POSITION); 178 SR(OVL_POSITION(OMAP_DSS_GFX));
254 SR(GFX_SIZE); 179 SR(OVL_SIZE(OMAP_DSS_GFX));
255 SR(GFX_ATTRIBUTES); 180 SR(OVL_ATTRIBUTES(OMAP_DSS_GFX));
256 SR(GFX_FIFO_THRESHOLD); 181 SR(OVL_FIFO_THRESHOLD(OMAP_DSS_GFX));
257 SR(GFX_ROW_INC); 182 SR(OVL_ROW_INC(OMAP_DSS_GFX));
258 SR(GFX_PIXEL_INC); 183 SR(OVL_PIXEL_INC(OMAP_DSS_GFX));
259 SR(GFX_WINDOW_SKIP); 184 SR(OVL_WINDOW_SKIP(OMAP_DSS_GFX));
260 SR(GFX_TABLE_BA); 185 SR(OVL_TABLE_BA(OMAP_DSS_GFX));
261 186
262 SR(DATA_CYCLE1(0)); 187 SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD));
263 SR(DATA_CYCLE2(0)); 188 SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
264 SR(DATA_CYCLE3(0)); 189 SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
265 190
266 SR(CPR_COEF_R(0)); 191 SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
267 SR(CPR_COEF_G(0)); 192 SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
268 SR(CPR_COEF_B(0)); 193 SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
269 if (dss_has_feature(FEAT_MGR_LCD2)) { 194 if (dss_has_feature(FEAT_MGR_LCD2)) {
270 SR(CPR_COEF_B(2)); 195 SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
271 SR(CPR_COEF_G(2)); 196 SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
272 SR(CPR_COEF_R(2)); 197 SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
273 198
274 SR(DATA_CYCLE1(2)); 199 SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
275 SR(DATA_CYCLE2(2)); 200 SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
276 SR(DATA_CYCLE3(2)); 201 SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
277 } 202 }
278 203
279 SR(GFX_PRELOAD); 204 SR(OVL_PRELOAD(OMAP_DSS_GFX));
280 205
281 /* VID1 */ 206 /* VID1 */
282 SR(VID_BA0(0)); 207 SR(OVL_BA0(OMAP_DSS_VIDEO1));
283 SR(VID_BA1(0)); 208 SR(OVL_BA1(OMAP_DSS_VIDEO1));
284 SR(VID_POSITION(0)); 209 SR(OVL_POSITION(OMAP_DSS_VIDEO1));
285 SR(VID_SIZE(0)); 210 SR(OVL_SIZE(OMAP_DSS_VIDEO1));
286 SR(VID_ATTRIBUTES(0)); 211 SR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
287 SR(VID_FIFO_THRESHOLD(0)); 212 SR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
288 SR(VID_ROW_INC(0)); 213 SR(OVL_ROW_INC(OMAP_DSS_VIDEO1));
289 SR(VID_PIXEL_INC(0)); 214 SR(OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
290 SR(VID_FIR(0)); 215 SR(OVL_FIR(OMAP_DSS_VIDEO1));
291 SR(VID_PICTURE_SIZE(0)); 216 SR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
292 SR(VID_ACCU0(0)); 217 SR(OVL_ACCU0(OMAP_DSS_VIDEO1));
293 SR(VID_ACCU1(0)); 218 SR(OVL_ACCU1(OMAP_DSS_VIDEO1));
294 219
295 SR(VID_FIR_COEF_H(0, 0)); 220 for (i = 0; i < 8; i++)
296 SR(VID_FIR_COEF_H(0, 1)); 221 SR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, i));
297 SR(VID_FIR_COEF_H(0, 2)); 222
298 SR(VID_FIR_COEF_H(0, 3)); 223 for (i = 0; i < 8; i++)
299 SR(VID_FIR_COEF_H(0, 4)); 224 SR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, i));
300 SR(VID_FIR_COEF_H(0, 5)); 225
301 SR(VID_FIR_COEF_H(0, 6)); 226 for (i = 0; i < 5; i++)
302 SR(VID_FIR_COEF_H(0, 7)); 227 SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
303 228
304 SR(VID_FIR_COEF_HV(0, 0)); 229 for (i = 0; i < 8; i++)
305 SR(VID_FIR_COEF_HV(0, 1)); 230 SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
306 SR(VID_FIR_COEF_HV(0, 2)); 231
307 SR(VID_FIR_COEF_HV(0, 3)); 232 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
308 SR(VID_FIR_COEF_HV(0, 4)); 233 SR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
309 SR(VID_FIR_COEF_HV(0, 5)); 234 SR(OVL_BA1_UV(OMAP_DSS_VIDEO1));
310 SR(VID_FIR_COEF_HV(0, 6)); 235 SR(OVL_FIR2(OMAP_DSS_VIDEO1));
311 SR(VID_FIR_COEF_HV(0, 7)); 236 SR(OVL_ACCU2_0(OMAP_DSS_VIDEO1));
312 237 SR(OVL_ACCU2_1(OMAP_DSS_VIDEO1));
313 SR(VID_CONV_COEF(0, 0)); 238
314 SR(VID_CONV_COEF(0, 1)); 239 for (i = 0; i < 8; i++)
315 SR(VID_CONV_COEF(0, 2)); 240 SR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, i));
316 SR(VID_CONV_COEF(0, 3)); 241
317 SR(VID_CONV_COEF(0, 4)); 242 for (i = 0; i < 8; i++)
318 243 SR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, i));
319 SR(VID_FIR_COEF_V(0, 0)); 244
320 SR(VID_FIR_COEF_V(0, 1)); 245 for (i = 0; i < 8; i++)
321 SR(VID_FIR_COEF_V(0, 2)); 246 SR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, i));
322 SR(VID_FIR_COEF_V(0, 3)); 247 }
323 SR(VID_FIR_COEF_V(0, 4)); 248 if (dss_has_feature(FEAT_ATTR2))
324 SR(VID_FIR_COEF_V(0, 5)); 249 SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
325 SR(VID_FIR_COEF_V(0, 6)); 250
326 SR(VID_FIR_COEF_V(0, 7)); 251 SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
327
328 SR(VID_PRELOAD(0));
329 252
330 /* VID2 */ 253 /* VID2 */
331 SR(VID_BA0(1)); 254 SR(OVL_BA0(OMAP_DSS_VIDEO2));
332 SR(VID_BA1(1)); 255 SR(OVL_BA1(OMAP_DSS_VIDEO2));
333 SR(VID_POSITION(1)); 256 SR(OVL_POSITION(OMAP_DSS_VIDEO2));
334 SR(VID_SIZE(1)); 257 SR(OVL_SIZE(OMAP_DSS_VIDEO2));
335 SR(VID_ATTRIBUTES(1)); 258 SR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
336 SR(VID_FIFO_THRESHOLD(1)); 259 SR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
337 SR(VID_ROW_INC(1)); 260 SR(OVL_ROW_INC(OMAP_DSS_VIDEO2));
338 SR(VID_PIXEL_INC(1)); 261 SR(OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
339 SR(VID_FIR(1)); 262 SR(OVL_FIR(OMAP_DSS_VIDEO2));
340 SR(VID_PICTURE_SIZE(1)); 263 SR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
341 SR(VID_ACCU0(1)); 264 SR(OVL_ACCU0(OMAP_DSS_VIDEO2));
342 SR(VID_ACCU1(1)); 265 SR(OVL_ACCU1(OMAP_DSS_VIDEO2));
343 266
344 SR(VID_FIR_COEF_H(1, 0)); 267 for (i = 0; i < 8; i++)
345 SR(VID_FIR_COEF_H(1, 1)); 268 SR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, i));
346 SR(VID_FIR_COEF_H(1, 2)); 269
347 SR(VID_FIR_COEF_H(1, 3)); 270 for (i = 0; i < 8; i++)
348 SR(VID_FIR_COEF_H(1, 4)); 271 SR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, i));
349 SR(VID_FIR_COEF_H(1, 5)); 272
350 SR(VID_FIR_COEF_H(1, 6)); 273 for (i = 0; i < 5; i++)
351 SR(VID_FIR_COEF_H(1, 7)); 274 SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
352 275
353 SR(VID_FIR_COEF_HV(1, 0)); 276 for (i = 0; i < 8; i++)
354 SR(VID_FIR_COEF_HV(1, 1)); 277 SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
355 SR(VID_FIR_COEF_HV(1, 2)); 278
356 SR(VID_FIR_COEF_HV(1, 3)); 279 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
357 SR(VID_FIR_COEF_HV(1, 4)); 280 SR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
358 SR(VID_FIR_COEF_HV(1, 5)); 281 SR(OVL_BA1_UV(OMAP_DSS_VIDEO2));
359 SR(VID_FIR_COEF_HV(1, 6)); 282 SR(OVL_FIR2(OMAP_DSS_VIDEO2));
360 SR(VID_FIR_COEF_HV(1, 7)); 283 SR(OVL_ACCU2_0(OMAP_DSS_VIDEO2));
361 284 SR(OVL_ACCU2_1(OMAP_DSS_VIDEO2));
362 SR(VID_CONV_COEF(1, 0)); 285
363 SR(VID_CONV_COEF(1, 1)); 286 for (i = 0; i < 8; i++)
364 SR(VID_CONV_COEF(1, 2)); 287 SR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, i));
365 SR(VID_CONV_COEF(1, 3)); 288
366 SR(VID_CONV_COEF(1, 4)); 289 for (i = 0; i < 8; i++)
367 290 SR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, i));
368 SR(VID_FIR_COEF_V(1, 0)); 291
369 SR(VID_FIR_COEF_V(1, 1)); 292 for (i = 0; i < 8; i++)
370 SR(VID_FIR_COEF_V(1, 2)); 293 SR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, i));
371 SR(VID_FIR_COEF_V(1, 3)); 294 }
372 SR(VID_FIR_COEF_V(1, 4)); 295 if (dss_has_feature(FEAT_ATTR2))
373 SR(VID_FIR_COEF_V(1, 5)); 296 SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
374 SR(VID_FIR_COEF_V(1, 6)); 297
375 SR(VID_FIR_COEF_V(1, 7)); 298 SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
376
377 SR(VID_PRELOAD(1));
378 299
379 if (dss_has_feature(FEAT_CORE_CLK_DIV)) 300 if (dss_has_feature(FEAT_CORE_CLK_DIV))
380 SR(DIVISOR); 301 SR(DIVISOR);
@@ -382,160 +303,158 @@ void dispc_save_context(void)
382 303
383void dispc_restore_context(void) 304void dispc_restore_context(void)
384{ 305{
306 int i;
385 RR(SYSCONFIG); 307 RR(SYSCONFIG);
386 /*RR(IRQENABLE);*/ 308 /*RR(IRQENABLE);*/
387 /*RR(CONTROL);*/ 309 /*RR(CONTROL);*/
388 RR(CONFIG); 310 RR(CONFIG);
389 RR(DEFAULT_COLOR(0)); 311 RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD));
390 RR(DEFAULT_COLOR(1)); 312 RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT));
391 RR(TRANS_COLOR(0)); 313 RR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD));
392 RR(TRANS_COLOR(1)); 314 RR(TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT));
393 RR(LINE_NUMBER); 315 RR(LINE_NUMBER);
394 RR(TIMING_H(0)); 316 RR(TIMING_H(OMAP_DSS_CHANNEL_LCD));
395 RR(TIMING_V(0)); 317 RR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
396 RR(POL_FREQ(0)); 318 RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
397 RR(DIVISORo(0)); 319 RR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
398 RR(GLOBAL_ALPHA); 320 RR(GLOBAL_ALPHA);
399 RR(SIZE_DIG); 321 RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
400 RR(SIZE_LCD(0)); 322 RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
401 if (dss_has_feature(FEAT_MGR_LCD2)) { 323 if (dss_has_feature(FEAT_MGR_LCD2)) {
402 RR(DEFAULT_COLOR(2)); 324 RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2));
403 RR(TRANS_COLOR(2)); 325 RR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2));
404 RR(SIZE_LCD(2)); 326 RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD2));
405 RR(TIMING_H(2)); 327 RR(TIMING_H(OMAP_DSS_CHANNEL_LCD2));
406 RR(TIMING_V(2)); 328 RR(TIMING_V(OMAP_DSS_CHANNEL_LCD2));
407 RR(POL_FREQ(2)); 329 RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD2));
408 RR(DIVISORo(2)); 330 RR(DIVISORo(OMAP_DSS_CHANNEL_LCD2));
409 RR(CONFIG2); 331 RR(CONFIG2);
410 } 332 }
411 333
412 RR(GFX_BA0); 334 RR(OVL_BA0(OMAP_DSS_GFX));
413 RR(GFX_BA1); 335 RR(OVL_BA1(OMAP_DSS_GFX));
414 RR(GFX_POSITION); 336 RR(OVL_POSITION(OMAP_DSS_GFX));
415 RR(GFX_SIZE); 337 RR(OVL_SIZE(OMAP_DSS_GFX));
416 RR(GFX_ATTRIBUTES); 338 RR(OVL_ATTRIBUTES(OMAP_DSS_GFX));
417 RR(GFX_FIFO_THRESHOLD); 339 RR(OVL_FIFO_THRESHOLD(OMAP_DSS_GFX));
418 RR(GFX_ROW_INC); 340 RR(OVL_ROW_INC(OMAP_DSS_GFX));
419 RR(GFX_PIXEL_INC); 341 RR(OVL_PIXEL_INC(OMAP_DSS_GFX));
420 RR(GFX_WINDOW_SKIP); 342 RR(OVL_WINDOW_SKIP(OMAP_DSS_GFX));
421 RR(GFX_TABLE_BA); 343 RR(OVL_TABLE_BA(OMAP_DSS_GFX));
422 344
423 RR(DATA_CYCLE1(0)); 345
424 RR(DATA_CYCLE2(0)); 346 RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD));
425 RR(DATA_CYCLE3(0)); 347 RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
426 348 RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
427 RR(CPR_COEF_R(0)); 349
428 RR(CPR_COEF_G(0)); 350 RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
429 RR(CPR_COEF_B(0)); 351 RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
352 RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
430 if (dss_has_feature(FEAT_MGR_LCD2)) { 353 if (dss_has_feature(FEAT_MGR_LCD2)) {
431 RR(DATA_CYCLE1(2)); 354 RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
432 RR(DATA_CYCLE2(2)); 355 RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
433 RR(DATA_CYCLE3(2)); 356 RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
434 357
435 RR(CPR_COEF_B(2)); 358 RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
436 RR(CPR_COEF_G(2)); 359 RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
437 RR(CPR_COEF_R(2)); 360 RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
438 } 361 }
439 362
440 RR(GFX_PRELOAD); 363 RR(OVL_PRELOAD(OMAP_DSS_GFX));
441 364
442 /* VID1 */ 365 /* VID1 */
443 RR(VID_BA0(0)); 366 RR(OVL_BA0(OMAP_DSS_VIDEO1));
444 RR(VID_BA1(0)); 367 RR(OVL_BA1(OMAP_DSS_VIDEO1));
445 RR(VID_POSITION(0)); 368 RR(OVL_POSITION(OMAP_DSS_VIDEO1));
446 RR(VID_SIZE(0)); 369 RR(OVL_SIZE(OMAP_DSS_VIDEO1));
447 RR(VID_ATTRIBUTES(0)); 370 RR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
448 RR(VID_FIFO_THRESHOLD(0)); 371 RR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
449 RR(VID_ROW_INC(0)); 372 RR(OVL_ROW_INC(OMAP_DSS_VIDEO1));
450 RR(VID_PIXEL_INC(0)); 373 RR(OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
451 RR(VID_FIR(0)); 374 RR(OVL_FIR(OMAP_DSS_VIDEO1));
452 RR(VID_PICTURE_SIZE(0)); 375 RR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
453 RR(VID_ACCU0(0)); 376 RR(OVL_ACCU0(OMAP_DSS_VIDEO1));
454 RR(VID_ACCU1(0)); 377 RR(OVL_ACCU1(OMAP_DSS_VIDEO1));
455 378
456 RR(VID_FIR_COEF_H(0, 0)); 379 for (i = 0; i < 8; i++)
457 RR(VID_FIR_COEF_H(0, 1)); 380 RR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, i));
458 RR(VID_FIR_COEF_H(0, 2)); 381
459 RR(VID_FIR_COEF_H(0, 3)); 382 for (i = 0; i < 8; i++)
460 RR(VID_FIR_COEF_H(0, 4)); 383 RR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, i));
461 RR(VID_FIR_COEF_H(0, 5)); 384
462 RR(VID_FIR_COEF_H(0, 6)); 385 for (i = 0; i < 5; i++)
463 RR(VID_FIR_COEF_H(0, 7)); 386 RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
464 387
465 RR(VID_FIR_COEF_HV(0, 0)); 388 for (i = 0; i < 8; i++)
466 RR(VID_FIR_COEF_HV(0, 1)); 389 RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
467 RR(VID_FIR_COEF_HV(0, 2)); 390
468 RR(VID_FIR_COEF_HV(0, 3)); 391 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
469 RR(VID_FIR_COEF_HV(0, 4)); 392 RR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
470 RR(VID_FIR_COEF_HV(0, 5)); 393 RR(OVL_BA1_UV(OMAP_DSS_VIDEO1));
471 RR(VID_FIR_COEF_HV(0, 6)); 394 RR(OVL_FIR2(OMAP_DSS_VIDEO1));
472 RR(VID_FIR_COEF_HV(0, 7)); 395 RR(OVL_ACCU2_0(OMAP_DSS_VIDEO1));
473 396 RR(OVL_ACCU2_1(OMAP_DSS_VIDEO1));
474 RR(VID_CONV_COEF(0, 0)); 397
475 RR(VID_CONV_COEF(0, 1)); 398 for (i = 0; i < 8; i++)
476 RR(VID_CONV_COEF(0, 2)); 399 RR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, i));
477 RR(VID_CONV_COEF(0, 3)); 400
478 RR(VID_CONV_COEF(0, 4)); 401 for (i = 0; i < 8; i++)
479 402 RR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, i));
480 RR(VID_FIR_COEF_V(0, 0)); 403
481 RR(VID_FIR_COEF_V(0, 1)); 404 for (i = 0; i < 8; i++)
482 RR(VID_FIR_COEF_V(0, 2)); 405 RR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, i));
483 RR(VID_FIR_COEF_V(0, 3)); 406 }
484 RR(VID_FIR_COEF_V(0, 4)); 407 if (dss_has_feature(FEAT_ATTR2))
485 RR(VID_FIR_COEF_V(0, 5)); 408 RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
486 RR(VID_FIR_COEF_V(0, 6)); 409
487 RR(VID_FIR_COEF_V(0, 7)); 410 RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
488
489 RR(VID_PRELOAD(0));
490 411
491 /* VID2 */ 412 /* VID2 */
492 RR(VID_BA0(1)); 413 RR(OVL_BA0(OMAP_DSS_VIDEO2));
493 RR(VID_BA1(1)); 414 RR(OVL_BA1(OMAP_DSS_VIDEO2));
494 RR(VID_POSITION(1)); 415 RR(OVL_POSITION(OMAP_DSS_VIDEO2));
495 RR(VID_SIZE(1)); 416 RR(OVL_SIZE(OMAP_DSS_VIDEO2));
496 RR(VID_ATTRIBUTES(1)); 417 RR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
497 RR(VID_FIFO_THRESHOLD(1)); 418 RR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
498 RR(VID_ROW_INC(1)); 419 RR(OVL_ROW_INC(OMAP_DSS_VIDEO2));
499 RR(VID_PIXEL_INC(1)); 420 RR(OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
500 RR(VID_FIR(1)); 421 RR(OVL_FIR(OMAP_DSS_VIDEO2));
501 RR(VID_PICTURE_SIZE(1)); 422 RR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
502 RR(VID_ACCU0(1)); 423 RR(OVL_ACCU0(OMAP_DSS_VIDEO2));
503 RR(VID_ACCU1(1)); 424 RR(OVL_ACCU1(OMAP_DSS_VIDEO2));
504 425
505 RR(VID_FIR_COEF_H(1, 0)); 426 for (i = 0; i < 8; i++)
506 RR(VID_FIR_COEF_H(1, 1)); 427 RR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, i));
507 RR(VID_FIR_COEF_H(1, 2)); 428
508 RR(VID_FIR_COEF_H(1, 3)); 429 for (i = 0; i < 8; i++)
509 RR(VID_FIR_COEF_H(1, 4)); 430 RR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, i));
510 RR(VID_FIR_COEF_H(1, 5)); 431
511 RR(VID_FIR_COEF_H(1, 6)); 432 for (i = 0; i < 5; i++)
512 RR(VID_FIR_COEF_H(1, 7)); 433 RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
513 434
514 RR(VID_FIR_COEF_HV(1, 0)); 435 for (i = 0; i < 8; i++)
515 RR(VID_FIR_COEF_HV(1, 1)); 436 RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
516 RR(VID_FIR_COEF_HV(1, 2)); 437
517 RR(VID_FIR_COEF_HV(1, 3)); 438 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
518 RR(VID_FIR_COEF_HV(1, 4)); 439 RR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
519 RR(VID_FIR_COEF_HV(1, 5)); 440 RR(OVL_BA1_UV(OMAP_DSS_VIDEO2));
520 RR(VID_FIR_COEF_HV(1, 6)); 441 RR(OVL_FIR2(OMAP_DSS_VIDEO2));
521 RR(VID_FIR_COEF_HV(1, 7)); 442 RR(OVL_ACCU2_0(OMAP_DSS_VIDEO2));
522 443 RR(OVL_ACCU2_1(OMAP_DSS_VIDEO2));
523 RR(VID_CONV_COEF(1, 0)); 444
524 RR(VID_CONV_COEF(1, 1)); 445 for (i = 0; i < 8; i++)
525 RR(VID_CONV_COEF(1, 2)); 446 RR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, i));
526 RR(VID_CONV_COEF(1, 3)); 447
527 RR(VID_CONV_COEF(1, 4)); 448 for (i = 0; i < 8; i++)
528 449 RR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, i));
529 RR(VID_FIR_COEF_V(1, 0)); 450
530 RR(VID_FIR_COEF_V(1, 1)); 451 for (i = 0; i < 8; i++)
531 RR(VID_FIR_COEF_V(1, 2)); 452 RR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, i));
532 RR(VID_FIR_COEF_V(1, 3)); 453 }
533 RR(VID_FIR_COEF_V(1, 4)); 454 if (dss_has_feature(FEAT_ATTR2))
534 RR(VID_FIR_COEF_V(1, 5)); 455 RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
535 RR(VID_FIR_COEF_V(1, 6)); 456
536 RR(VID_FIR_COEF_V(1, 7)); 457 RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
537
538 RR(VID_PRELOAD(1));
539 458
540 if (dss_has_feature(FEAT_CORE_CLK_DIV)) 459 if (dss_has_feature(FEAT_CORE_CLK_DIV))
541 RR(DIVISOR); 460 RR(DIVISOR);
@@ -632,27 +551,43 @@ end:
632 551
633static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value) 552static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
634{ 553{
554 dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value);
555}
556
557static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value)
558{
559 dispc_write_reg(DISPC_OVL_FIR_COEF_HV(plane, reg), value);
560}
561
562static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value)
563{
564 dispc_write_reg(DISPC_OVL_FIR_COEF_V(plane, reg), value);
565}
566
567static void _dispc_write_firh2_reg(enum omap_plane plane, int reg, u32 value)
568{
635 BUG_ON(plane == OMAP_DSS_GFX); 569 BUG_ON(plane == OMAP_DSS_GFX);
636 570
637 dispc_write_reg(DISPC_VID_FIR_COEF_H(plane-1, reg), value); 571 dispc_write_reg(DISPC_OVL_FIR_COEF_H2(plane, reg), value);
638} 572}
639 573
640static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value) 574static void _dispc_write_firhv2_reg(enum omap_plane plane, int reg, u32 value)
641{ 575{
642 BUG_ON(plane == OMAP_DSS_GFX); 576 BUG_ON(plane == OMAP_DSS_GFX);
643 577
644 dispc_write_reg(DISPC_VID_FIR_COEF_HV(plane-1, reg), value); 578 dispc_write_reg(DISPC_OVL_FIR_COEF_HV2(plane, reg), value);
645} 579}
646 580
647static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value) 581static void _dispc_write_firv2_reg(enum omap_plane plane, int reg, u32 value)
648{ 582{
649 BUG_ON(plane == OMAP_DSS_GFX); 583 BUG_ON(plane == OMAP_DSS_GFX);
650 584
651 dispc_write_reg(DISPC_VID_FIR_COEF_V(plane-1, reg), value); 585 dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value);
652} 586}
653 587
654static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup, 588static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
655 int vscaleup, int five_taps) 589 int vscaleup, int five_taps,
590 enum omap_color_component color_comp)
656{ 591{
657 /* Coefficients for horizontal up-sampling */ 592 /* Coefficients for horizontal up-sampling */
658 static const struct dispc_h_coef coef_hup[8] = { 593 static const struct dispc_h_coef coef_hup[8] = {
@@ -750,8 +685,14 @@ static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
750 | FLD_VAL(v_coef[i].vc1, 23, 16) 685 | FLD_VAL(v_coef[i].vc1, 23, 16)
751 | FLD_VAL(v_coef[i].vc2, 31, 24); 686 | FLD_VAL(v_coef[i].vc2, 31, 24);
752 687
753 _dispc_write_firh_reg(plane, i, h); 688 if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
754 _dispc_write_firhv_reg(plane, i, hv); 689 _dispc_write_firh_reg(plane, i, h);
690 _dispc_write_firhv_reg(plane, i, hv);
691 } else {
692 _dispc_write_firh2_reg(plane, i, h);
693 _dispc_write_firhv2_reg(plane, i, hv);
694 }
695
755 } 696 }
756 697
757 if (five_taps) { 698 if (five_taps) {
@@ -759,7 +700,10 @@ static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
759 u32 v; 700 u32 v;
760 v = FLD_VAL(v_coef[i].vc00, 7, 0) 701 v = FLD_VAL(v_coef[i].vc00, 7, 0)
761 | FLD_VAL(v_coef[i].vc22, 15, 8); 702 | FLD_VAL(v_coef[i].vc22, 15, 8);
762 _dispc_write_firv_reg(plane, i, v); 703 if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y)
704 _dispc_write_firv_reg(plane, i, v);
705 else
706 _dispc_write_firv2_reg(plane, i, v);
763 } 707 }
764 } 708 }
765} 709}
@@ -779,72 +723,83 @@ static void _dispc_setup_color_conv_coef(void)
779 723
780 ct = &ctbl_bt601_5; 724 ct = &ctbl_bt601_5;
781 725
782 dispc_write_reg(DISPC_VID_CONV_COEF(0, 0), CVAL(ct->rcr, ct->ry)); 726 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 0),
783 dispc_write_reg(DISPC_VID_CONV_COEF(0, 1), CVAL(ct->gy, ct->rcb)); 727 CVAL(ct->rcr, ct->ry));
784 dispc_write_reg(DISPC_VID_CONV_COEF(0, 2), CVAL(ct->gcb, ct->gcr)); 728 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 1),
785 dispc_write_reg(DISPC_VID_CONV_COEF(0, 3), CVAL(ct->bcr, ct->by)); 729 CVAL(ct->gy, ct->rcb));
786 dispc_write_reg(DISPC_VID_CONV_COEF(0, 4), CVAL(0, ct->bcb)); 730 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2),
787 731 CVAL(ct->gcb, ct->gcr));
788 dispc_write_reg(DISPC_VID_CONV_COEF(1, 0), CVAL(ct->rcr, ct->ry)); 732 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3),
789 dispc_write_reg(DISPC_VID_CONV_COEF(1, 1), CVAL(ct->gy, ct->rcb)); 733 CVAL(ct->bcr, ct->by));
790 dispc_write_reg(DISPC_VID_CONV_COEF(1, 2), CVAL(ct->gcb, ct->gcr)); 734 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4),
791 dispc_write_reg(DISPC_VID_CONV_COEF(1, 3), CVAL(ct->bcr, ct->by)); 735 CVAL(0, ct->bcb));
792 dispc_write_reg(DISPC_VID_CONV_COEF(1, 4), CVAL(0, ct->bcb)); 736
737 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 0),
738 CVAL(ct->rcr, ct->ry));
739 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 1),
740 CVAL(ct->gy, ct->rcb));
741 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2),
742 CVAL(ct->gcb, ct->gcr));
743 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3),
744 CVAL(ct->bcr, ct->by));
745 dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4),
746 CVAL(0, ct->bcb));
793 747
794#undef CVAL 748#undef CVAL
795 749
796 REG_FLD_MOD(DISPC_VID_ATTRIBUTES(0), ct->full_range, 11, 11); 750 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO1),
797 REG_FLD_MOD(DISPC_VID_ATTRIBUTES(1), ct->full_range, 11, 11); 751 ct->full_range, 11, 11);
752 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO2),
753 ct->full_range, 11, 11);
798} 754}
799 755
800 756
801static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr) 757static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr)
802{ 758{
803 const struct dispc_reg ba0_reg[] = { DISPC_GFX_BA0, 759 dispc_write_reg(DISPC_OVL_BA0(plane), paddr);
804 DISPC_VID_BA0(0),
805 DISPC_VID_BA0(1) };
806
807 dispc_write_reg(ba0_reg[plane], paddr);
808} 760}
809 761
810static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr) 762static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr)
811{ 763{
812 const struct dispc_reg ba1_reg[] = { DISPC_GFX_BA1, 764 dispc_write_reg(DISPC_OVL_BA1(plane), paddr);
813 DISPC_VID_BA1(0), 765}
814 DISPC_VID_BA1(1) };
815 766
816 dispc_write_reg(ba1_reg[plane], paddr); 767static void _dispc_set_plane_ba0_uv(enum omap_plane plane, u32 paddr)
768{
769 dispc_write_reg(DISPC_OVL_BA0_UV(plane), paddr);
817} 770}
818 771
819static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y) 772static void _dispc_set_plane_ba1_uv(enum omap_plane plane, u32 paddr)
820{ 773{
821 const struct dispc_reg pos_reg[] = { DISPC_GFX_POSITION, 774 dispc_write_reg(DISPC_OVL_BA1_UV(plane), paddr);
822 DISPC_VID_POSITION(0), 775}
823 DISPC_VID_POSITION(1) };
824 776
777static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y)
778{
825 u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0); 779 u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
826 dispc_write_reg(pos_reg[plane], val); 780
781 dispc_write_reg(DISPC_OVL_POSITION(plane), val);
827} 782}
828 783
829static void _dispc_set_pic_size(enum omap_plane plane, int width, int height) 784static void _dispc_set_pic_size(enum omap_plane plane, int width, int height)
830{ 785{
831 const struct dispc_reg siz_reg[] = { DISPC_GFX_SIZE,
832 DISPC_VID_PICTURE_SIZE(0),
833 DISPC_VID_PICTURE_SIZE(1) };
834 u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 786 u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
835 dispc_write_reg(siz_reg[plane], val); 787
788 if (plane == OMAP_DSS_GFX)
789 dispc_write_reg(DISPC_OVL_SIZE(plane), val);
790 else
791 dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val);
836} 792}
837 793
838static void _dispc_set_vid_size(enum omap_plane plane, int width, int height) 794static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
839{ 795{
840 u32 val; 796 u32 val;
841 const struct dispc_reg vsi_reg[] = { DISPC_VID_SIZE(0),
842 DISPC_VID_SIZE(1) };
843 797
844 BUG_ON(plane == OMAP_DSS_GFX); 798 BUG_ON(plane == OMAP_DSS_GFX);
845 799
846 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 800 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
847 dispc_write_reg(vsi_reg[plane-1], val); 801
802 dispc_write_reg(DISPC_OVL_SIZE(plane), val);
848} 803}
849 804
850static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable) 805static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable)
@@ -856,7 +811,7 @@ static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable)
856 plane == OMAP_DSS_VIDEO1) 811 plane == OMAP_DSS_VIDEO1)
857 return; 812 return;
858 813
859 REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 28, 28); 814 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
860} 815}
861 816
862static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha) 817static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
@@ -876,61 +831,93 @@ static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
876 831
877static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc) 832static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc)
878{ 833{
879 const struct dispc_reg ri_reg[] = { DISPC_GFX_PIXEL_INC, 834 dispc_write_reg(DISPC_OVL_PIXEL_INC(plane), inc);
880 DISPC_VID_PIXEL_INC(0),
881 DISPC_VID_PIXEL_INC(1) };
882
883 dispc_write_reg(ri_reg[plane], inc);
884} 835}
885 836
886static void _dispc_set_row_inc(enum omap_plane plane, s32 inc) 837static void _dispc_set_row_inc(enum omap_plane plane, s32 inc)
887{ 838{
888 const struct dispc_reg ri_reg[] = { DISPC_GFX_ROW_INC, 839 dispc_write_reg(DISPC_OVL_ROW_INC(plane), inc);
889 DISPC_VID_ROW_INC(0),
890 DISPC_VID_ROW_INC(1) };
891
892 dispc_write_reg(ri_reg[plane], inc);
893} 840}
894 841
895static void _dispc_set_color_mode(enum omap_plane plane, 842static void _dispc_set_color_mode(enum omap_plane plane,
896 enum omap_color_mode color_mode) 843 enum omap_color_mode color_mode)
897{ 844{
898 u32 m = 0; 845 u32 m = 0;
899 846 if (plane != OMAP_DSS_GFX) {
900 switch (color_mode) { 847 switch (color_mode) {
901 case OMAP_DSS_COLOR_CLUT1: 848 case OMAP_DSS_COLOR_NV12:
902 m = 0x0; break; 849 m = 0x0; break;
903 case OMAP_DSS_COLOR_CLUT2: 850 case OMAP_DSS_COLOR_RGB12U:
904 m = 0x1; break; 851 m = 0x1; break;
905 case OMAP_DSS_COLOR_CLUT4: 852 case OMAP_DSS_COLOR_RGBA16:
906 m = 0x2; break; 853 m = 0x2; break;
907 case OMAP_DSS_COLOR_CLUT8: 854 case OMAP_DSS_COLOR_RGBX16:
908 m = 0x3; break; 855 m = 0x4; break;
909 case OMAP_DSS_COLOR_RGB12U: 856 case OMAP_DSS_COLOR_ARGB16:
910 m = 0x4; break; 857 m = 0x5; break;
911 case OMAP_DSS_COLOR_ARGB16: 858 case OMAP_DSS_COLOR_RGB16:
912 m = 0x5; break; 859 m = 0x6; break;
913 case OMAP_DSS_COLOR_RGB16: 860 case OMAP_DSS_COLOR_ARGB16_1555:
914 m = 0x6; break; 861 m = 0x7; break;
915 case OMAP_DSS_COLOR_RGB24U: 862 case OMAP_DSS_COLOR_RGB24U:
916 m = 0x8; break; 863 m = 0x8; break;
917 case OMAP_DSS_COLOR_RGB24P: 864 case OMAP_DSS_COLOR_RGB24P:
918 m = 0x9; break; 865 m = 0x9; break;
919 case OMAP_DSS_COLOR_YUV2: 866 case OMAP_DSS_COLOR_YUV2:
920 m = 0xa; break; 867 m = 0xa; break;
921 case OMAP_DSS_COLOR_UYVY: 868 case OMAP_DSS_COLOR_UYVY:
922 m = 0xb; break; 869 m = 0xb; break;
923 case OMAP_DSS_COLOR_ARGB32: 870 case OMAP_DSS_COLOR_ARGB32:
924 m = 0xc; break; 871 m = 0xc; break;
925 case OMAP_DSS_COLOR_RGBA32: 872 case OMAP_DSS_COLOR_RGBA32:
926 m = 0xd; break; 873 m = 0xd; break;
927 case OMAP_DSS_COLOR_RGBX32: 874 case OMAP_DSS_COLOR_RGBX32:
928 m = 0xe; break; 875 m = 0xe; break;
929 default: 876 case OMAP_DSS_COLOR_XRGB16_1555:
930 BUG(); break; 877 m = 0xf; break;
878 default:
879 BUG(); break;
880 }
881 } else {
882 switch (color_mode) {
883 case OMAP_DSS_COLOR_CLUT1:
884 m = 0x0; break;
885 case OMAP_DSS_COLOR_CLUT2:
886 m = 0x1; break;
887 case OMAP_DSS_COLOR_CLUT4:
888 m = 0x2; break;
889 case OMAP_DSS_COLOR_CLUT8:
890 m = 0x3; break;
891 case OMAP_DSS_COLOR_RGB12U:
892 m = 0x4; break;
893 case OMAP_DSS_COLOR_ARGB16:
894 m = 0x5; break;
895 case OMAP_DSS_COLOR_RGB16:
896 m = 0x6; break;
897 case OMAP_DSS_COLOR_ARGB16_1555:
898 m = 0x7; break;
899 case OMAP_DSS_COLOR_RGB24U:
900 m = 0x8; break;
901 case OMAP_DSS_COLOR_RGB24P:
902 m = 0x9; break;
903 case OMAP_DSS_COLOR_YUV2:
904 m = 0xa; break;
905 case OMAP_DSS_COLOR_UYVY:
906 m = 0xb; break;
907 case OMAP_DSS_COLOR_ARGB32:
908 m = 0xc; break;
909 case OMAP_DSS_COLOR_RGBA32:
910 m = 0xd; break;
911 case OMAP_DSS_COLOR_RGBX32:
912 m = 0xe; break;
913 case OMAP_DSS_COLOR_XRGB16_1555:
914 m = 0xf; break;
915 default:
916 BUG(); break;
917 }
931 } 918 }
932 919
933 REG_FLD_MOD(dispc_reg_att[plane], m, 4, 1); 920 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
934} 921}
935 922
936static void _dispc_set_channel_out(enum omap_plane plane, 923static void _dispc_set_channel_out(enum omap_plane plane,
@@ -953,7 +940,7 @@ static void _dispc_set_channel_out(enum omap_plane plane,
953 return; 940 return;
954 } 941 }
955 942
956 val = dispc_read_reg(dispc_reg_att[plane]); 943 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
957 if (dss_has_feature(FEAT_MGR_LCD2)) { 944 if (dss_has_feature(FEAT_MGR_LCD2)) {
958 switch (channel) { 945 switch (channel) {
959 case OMAP_DSS_CHANNEL_LCD: 946 case OMAP_DSS_CHANNEL_LCD:
@@ -977,7 +964,7 @@ static void _dispc_set_channel_out(enum omap_plane plane,
977 } else { 964 } else {
978 val = FLD_MOD(val, channel, shift, shift); 965 val = FLD_MOD(val, channel, shift, shift);
979 } 966 }
980 dispc_write_reg(dispc_reg_att[plane], val); 967 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
981} 968}
982 969
983void dispc_set_burst_size(enum omap_plane plane, 970void dispc_set_burst_size(enum omap_plane plane,
@@ -1001,9 +988,9 @@ void dispc_set_burst_size(enum omap_plane plane,
1001 return; 988 return;
1002 } 989 }
1003 990
1004 val = dispc_read_reg(dispc_reg_att[plane]); 991 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
1005 val = FLD_MOD(val, burst_size, shift+1, shift); 992 val = FLD_MOD(val, burst_size, shift+1, shift);
1006 dispc_write_reg(dispc_reg_att[plane], val); 993 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
1007 994
1008 enable_clocks(0); 995 enable_clocks(0);
1009} 996}
@@ -1028,9 +1015,9 @@ static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
1028 1015
1029 BUG_ON(plane == OMAP_DSS_GFX); 1016 BUG_ON(plane == OMAP_DSS_GFX);
1030 1017
1031 val = dispc_read_reg(dispc_reg_att[plane]); 1018 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
1032 val = FLD_MOD(val, enable, 9, 9); 1019 val = FLD_MOD(val, enable, 9, 9);
1033 dispc_write_reg(dispc_reg_att[plane], val); 1020 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
1034} 1021}
1035 1022
1036void dispc_enable_replication(enum omap_plane plane, bool enable) 1023void dispc_enable_replication(enum omap_plane plane, bool enable)
@@ -1043,7 +1030,7 @@ void dispc_enable_replication(enum omap_plane plane, bool enable)
1043 bit = 10; 1030 bit = 10;
1044 1031
1045 enable_clocks(1); 1032 enable_clocks(1);
1046 REG_FLD_MOD(dispc_reg_att[plane], enable, bit, bit); 1033 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
1047 enable_clocks(0); 1034 enable_clocks(0);
1048} 1035}
1049 1036
@@ -1053,7 +1040,7 @@ void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
1053 BUG_ON((width > (1 << 11)) || (height > (1 << 11))); 1040 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
1054 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 1041 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
1055 enable_clocks(1); 1042 enable_clocks(1);
1056 dispc_write_reg(DISPC_SIZE_LCD(channel), val); 1043 dispc_write_reg(DISPC_SIZE_MGR(channel), val);
1057 enable_clocks(0); 1044 enable_clocks(0);
1058} 1045}
1059 1046
@@ -1063,15 +1050,12 @@ void dispc_set_digit_size(u16 width, u16 height)
1063 BUG_ON((width > (1 << 11)) || (height > (1 << 11))); 1050 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
1064 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 1051 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
1065 enable_clocks(1); 1052 enable_clocks(1);
1066 dispc_write_reg(DISPC_SIZE_DIG, val); 1053 dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
1067 enable_clocks(0); 1054 enable_clocks(0);
1068} 1055}
1069 1056
1070static void dispc_read_plane_fifo_sizes(void) 1057static void dispc_read_plane_fifo_sizes(void)
1071{ 1058{
1072 const struct dispc_reg fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
1073 DISPC_VID_FIFO_SIZE_STATUS(0),
1074 DISPC_VID_FIFO_SIZE_STATUS(1) };
1075 u32 size; 1059 u32 size;
1076 int plane; 1060 int plane;
1077 u8 start, end; 1061 u8 start, end;
@@ -1081,7 +1065,8 @@ static void dispc_read_plane_fifo_sizes(void)
1081 dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end); 1065 dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
1082 1066
1083 for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) { 1067 for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
1084 size = FLD_GET(dispc_read_reg(fsz_reg[plane]), start, end); 1068 size = FLD_GET(dispc_read_reg(DISPC_OVL_FIFO_SIZE_STATUS(plane)),
1069 start, end);
1085 dispc.fifo_size[plane] = size; 1070 dispc.fifo_size[plane] = size;
1086 } 1071 }
1087 1072
@@ -1095,23 +1080,22 @@ u32 dispc_get_plane_fifo_size(enum omap_plane plane)
1095 1080
1096void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high) 1081void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
1097{ 1082{
1098 const struct dispc_reg ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
1099 DISPC_VID_FIFO_THRESHOLD(0),
1100 DISPC_VID_FIFO_THRESHOLD(1) };
1101 u8 hi_start, hi_end, lo_start, lo_end; 1083 u8 hi_start, hi_end, lo_start, lo_end;
1102 1084
1085 dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
1086 dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
1087
1103 enable_clocks(1); 1088 enable_clocks(1);
1104 1089
1105 DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n", 1090 DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
1106 plane, 1091 plane,
1107 REG_GET(ftrs_reg[plane], 11, 0), 1092 REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
1108 REG_GET(ftrs_reg[plane], 27, 16), 1093 lo_start, lo_end),
1094 REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
1095 hi_start, hi_end),
1109 low, high); 1096 low, high);
1110 1097
1111 dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end); 1098 dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
1112 dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
1113
1114 dispc_write_reg(ftrs_reg[plane],
1115 FLD_VAL(high, hi_start, hi_end) | 1099 FLD_VAL(high, hi_start, hi_end) |
1116 FLD_VAL(low, lo_start, lo_end)); 1100 FLD_VAL(low, lo_start, lo_end));
1117 1101
@@ -1128,106 +1112,120 @@ void dispc_enable_fifomerge(bool enable)
1128 enable_clocks(0); 1112 enable_clocks(0);
1129} 1113}
1130 1114
1131static void _dispc_set_fir(enum omap_plane plane, int hinc, int vinc) 1115static void _dispc_set_fir(enum omap_plane plane,
1116 int hinc, int vinc,
1117 enum omap_color_component color_comp)
1132{ 1118{
1133 u32 val; 1119 u32 val;
1134 const struct dispc_reg fir_reg[] = { DISPC_VID_FIR(0),
1135 DISPC_VID_FIR(1) };
1136 u8 hinc_start, hinc_end, vinc_start, vinc_end;
1137
1138 BUG_ON(plane == OMAP_DSS_GFX);
1139 1120
1140 dss_feat_get_reg_field(FEAT_REG_FIRHINC, &hinc_start, &hinc_end); 1121 if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
1141 dss_feat_get_reg_field(FEAT_REG_FIRVINC, &vinc_start, &vinc_end); 1122 u8 hinc_start, hinc_end, vinc_start, vinc_end;
1142 1123
1143 val = FLD_VAL(vinc, vinc_start, vinc_end) | 1124 dss_feat_get_reg_field(FEAT_REG_FIRHINC,
1144 FLD_VAL(hinc, hinc_start, hinc_end); 1125 &hinc_start, &hinc_end);
1126 dss_feat_get_reg_field(FEAT_REG_FIRVINC,
1127 &vinc_start, &vinc_end);
1128 val = FLD_VAL(vinc, vinc_start, vinc_end) |
1129 FLD_VAL(hinc, hinc_start, hinc_end);
1145 1130
1146 dispc_write_reg(fir_reg[plane-1], val); 1131 dispc_write_reg(DISPC_OVL_FIR(plane), val);
1132 } else {
1133 val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
1134 dispc_write_reg(DISPC_OVL_FIR2(plane), val);
1135 }
1147} 1136}
1148 1137
1149static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu) 1138static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
1150{ 1139{
1151 u32 val; 1140 u32 val;
1152 const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU0(0),
1153 DISPC_VID_ACCU0(1) };
1154 u8 hor_start, hor_end, vert_start, vert_end; 1141 u8 hor_start, hor_end, vert_start, vert_end;
1155 1142
1156 BUG_ON(plane == OMAP_DSS_GFX);
1157
1158 dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end); 1143 dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
1159 dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end); 1144 dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
1160 1145
1161 val = FLD_VAL(vaccu, vert_start, vert_end) | 1146 val = FLD_VAL(vaccu, vert_start, vert_end) |
1162 FLD_VAL(haccu, hor_start, hor_end); 1147 FLD_VAL(haccu, hor_start, hor_end);
1163 1148
1164 dispc_write_reg(ac0_reg[plane-1], val); 1149 dispc_write_reg(DISPC_OVL_ACCU0(plane), val);
1165} 1150}
1166 1151
1167static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu) 1152static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
1168{ 1153{
1169 u32 val; 1154 u32 val;
1170 const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU1(0),
1171 DISPC_VID_ACCU1(1) };
1172 u8 hor_start, hor_end, vert_start, vert_end; 1155 u8 hor_start, hor_end, vert_start, vert_end;
1173 1156
1174 BUG_ON(plane == OMAP_DSS_GFX);
1175
1176 dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end); 1157 dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
1177 dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end); 1158 dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
1178 1159
1179 val = FLD_VAL(vaccu, vert_start, vert_end) | 1160 val = FLD_VAL(vaccu, vert_start, vert_end) |
1180 FLD_VAL(haccu, hor_start, hor_end); 1161 FLD_VAL(haccu, hor_start, hor_end);
1181 1162
1182 dispc_write_reg(ac1_reg[plane-1], val); 1163 dispc_write_reg(DISPC_OVL_ACCU1(plane), val);
1164}
1165
1166static void _dispc_set_vid_accu2_0(enum omap_plane plane, int haccu, int vaccu)
1167{
1168 u32 val;
1169
1170 val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
1171 dispc_write_reg(DISPC_OVL_ACCU2_0(plane), val);
1183} 1172}
1184 1173
1174static void _dispc_set_vid_accu2_1(enum omap_plane plane, int haccu, int vaccu)
1175{
1176 u32 val;
1185 1177
1186static void _dispc_set_scaling(enum omap_plane plane, 1178 val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
1179 dispc_write_reg(DISPC_OVL_ACCU2_1(plane), val);
1180}
1181
1182static void _dispc_set_scale_param(enum omap_plane plane,
1187 u16 orig_width, u16 orig_height, 1183 u16 orig_width, u16 orig_height,
1188 u16 out_width, u16 out_height, 1184 u16 out_width, u16 out_height,
1189 bool ilace, bool five_taps, 1185 bool five_taps, u8 rotation,
1190 bool fieldmode) 1186 enum omap_color_component color_comp)
1191{ 1187{
1192 int fir_hinc; 1188 int fir_hinc, fir_vinc;
1193 int fir_vinc;
1194 int hscaleup, vscaleup; 1189 int hscaleup, vscaleup;
1195 int accu0 = 0;
1196 int accu1 = 0;
1197 u32 l;
1198
1199 BUG_ON(plane == OMAP_DSS_GFX);
1200 1190
1201 hscaleup = orig_width <= out_width; 1191 hscaleup = orig_width <= out_width;
1202 vscaleup = orig_height <= out_height; 1192 vscaleup = orig_height <= out_height;
1203 1193
1204 _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps); 1194 _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps, color_comp);
1205 1195
1206 if (!orig_width || orig_width == out_width) 1196 fir_hinc = 1024 * orig_width / out_width;
1207 fir_hinc = 0; 1197 fir_vinc = 1024 * orig_height / out_height;
1208 else
1209 fir_hinc = 1024 * orig_width / out_width;
1210 1198
1211 if (!orig_height || orig_height == out_height) 1199 _dispc_set_fir(plane, fir_hinc, fir_vinc, color_comp);
1212 fir_vinc = 0; 1200}
1213 else
1214 fir_vinc = 1024 * orig_height / out_height;
1215 1201
1216 _dispc_set_fir(plane, fir_hinc, fir_vinc); 1202static void _dispc_set_scaling_common(enum omap_plane plane,
1203 u16 orig_width, u16 orig_height,
1204 u16 out_width, u16 out_height,
1205 bool ilace, bool five_taps,
1206 bool fieldmode, enum omap_color_mode color_mode,
1207 u8 rotation)
1208{
1209 int accu0 = 0;
1210 int accu1 = 0;
1211 u32 l;
1217 1212
1218 l = dispc_read_reg(dispc_reg_att[plane]); 1213 _dispc_set_scale_param(plane, orig_width, orig_height,
1214 out_width, out_height, five_taps,
1215 rotation, DISPC_COLOR_COMPONENT_RGB_Y);
1216 l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
1219 1217
1220 /* RESIZEENABLE and VERTICALTAPS */ 1218 /* RESIZEENABLE and VERTICALTAPS */
1221 l &= ~((0x3 << 5) | (0x1 << 21)); 1219 l &= ~((0x3 << 5) | (0x1 << 21));
1222 l |= fir_hinc ? (1 << 5) : 0; 1220 l |= (orig_width != out_width) ? (1 << 5) : 0;
1223 l |= fir_vinc ? (1 << 6) : 0; 1221 l |= (orig_height != out_height) ? (1 << 6) : 0;
1224 l |= five_taps ? (1 << 21) : 0; 1222 l |= five_taps ? (1 << 21) : 0;
1225 1223
1226 /* VRESIZECONF and HRESIZECONF */ 1224 /* VRESIZECONF and HRESIZECONF */
1227 if (dss_has_feature(FEAT_RESIZECONF)) { 1225 if (dss_has_feature(FEAT_RESIZECONF)) {
1228 l &= ~(0x3 << 7); 1226 l &= ~(0x3 << 7);
1229 l |= hscaleup ? 0 : (1 << 7); 1227 l |= (orig_width <= out_width) ? 0 : (1 << 7);
1230 l |= vscaleup ? 0 : (1 << 8); 1228 l |= (orig_height <= out_height) ? 0 : (1 << 8);
1231 } 1229 }
1232 1230
1233 /* LINEBUFFERSPLIT */ 1231 /* LINEBUFFERSPLIT */
@@ -1236,7 +1234,7 @@ static void _dispc_set_scaling(enum omap_plane plane,
1236 l |= five_taps ? (1 << 22) : 0; 1234 l |= five_taps ? (1 << 22) : 0;
1237 } 1235 }
1238 1236
1239 dispc_write_reg(dispc_reg_att[plane], l); 1237 dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
1240 1238
1241 /* 1239 /*
1242 * field 0 = even field = bottom field 1240 * field 0 = even field = bottom field
@@ -1244,7 +1242,7 @@ static void _dispc_set_scaling(enum omap_plane plane,
1244 */ 1242 */
1245 if (ilace && !fieldmode) { 1243 if (ilace && !fieldmode) {
1246 accu1 = 0; 1244 accu1 = 0;
1247 accu0 = (fir_vinc / 2) & 0x3ff; 1245 accu0 = ((1024 * orig_height / out_height) / 2) & 0x3ff;
1248 if (accu0 >= 1024/2) { 1246 if (accu0 >= 1024/2) {
1249 accu1 = 1024/2; 1247 accu1 = 1024/2;
1250 accu0 -= accu1; 1248 accu0 -= accu1;
@@ -1255,6 +1253,93 @@ static void _dispc_set_scaling(enum omap_plane plane,
1255 _dispc_set_vid_accu1(plane, 0, accu1); 1253 _dispc_set_vid_accu1(plane, 0, accu1);
1256} 1254}
1257 1255
1256static void _dispc_set_scaling_uv(enum omap_plane plane,
1257 u16 orig_width, u16 orig_height,
1258 u16 out_width, u16 out_height,
1259 bool ilace, bool five_taps,
1260 bool fieldmode, enum omap_color_mode color_mode,
1261 u8 rotation)
1262{
1263 int scale_x = out_width != orig_width;
1264 int scale_y = out_height != orig_height;
1265
1266 if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
1267 return;
1268 if ((color_mode != OMAP_DSS_COLOR_YUV2 &&
1269 color_mode != OMAP_DSS_COLOR_UYVY &&
1270 color_mode != OMAP_DSS_COLOR_NV12)) {
1271 /* reset chroma resampling for RGB formats */
1272 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
1273 return;
1274 }
1275 switch (color_mode) {
1276 case OMAP_DSS_COLOR_NV12:
1277 /* UV is subsampled by 2 vertically*/
1278 orig_height >>= 1;
1279 /* UV is subsampled by 2 horz.*/
1280 orig_width >>= 1;
1281 break;
1282 case OMAP_DSS_COLOR_YUV2:
1283 case OMAP_DSS_COLOR_UYVY:
1284 /*For YUV422 with 90/270 rotation,
1285 *we don't upsample chroma
1286 */
1287 if (rotation == OMAP_DSS_ROT_0 ||
1288 rotation == OMAP_DSS_ROT_180)
1289 /* UV is subsampled by 2 hrz*/
1290 orig_width >>= 1;
1291 /* must use FIR for YUV422 if rotated */
1292 if (rotation != OMAP_DSS_ROT_0)
1293 scale_x = scale_y = true;
1294 break;
1295 default:
1296 BUG();
1297 }
1298
1299 if (out_width != orig_width)
1300 scale_x = true;
1301 if (out_height != orig_height)
1302 scale_y = true;
1303
1304 _dispc_set_scale_param(plane, orig_width, orig_height,
1305 out_width, out_height, five_taps,
1306 rotation, DISPC_COLOR_COMPONENT_UV);
1307
1308 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane),
1309 (scale_x || scale_y) ? 1 : 0, 8, 8);
1310 /* set H scaling */
1311 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
1312 /* set V scaling */
1313 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
1314
1315 _dispc_set_vid_accu2_0(plane, 0x80, 0);
1316 _dispc_set_vid_accu2_1(plane, 0x80, 0);
1317}
1318
1319static void _dispc_set_scaling(enum omap_plane plane,
1320 u16 orig_width, u16 orig_height,
1321 u16 out_width, u16 out_height,
1322 bool ilace, bool five_taps,
1323 bool fieldmode, enum omap_color_mode color_mode,
1324 u8 rotation)
1325{
1326 BUG_ON(plane == OMAP_DSS_GFX);
1327
1328 _dispc_set_scaling_common(plane,
1329 orig_width, orig_height,
1330 out_width, out_height,
1331 ilace, five_taps,
1332 fieldmode, color_mode,
1333 rotation);
1334
1335 _dispc_set_scaling_uv(plane,
1336 orig_width, orig_height,
1337 out_width, out_height,
1338 ilace, five_taps,
1339 fieldmode, color_mode,
1340 rotation);
1341}
1342
1258static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation, 1343static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
1259 bool mirroring, enum omap_color_mode color_mode) 1344 bool mirroring, enum omap_color_mode color_mode)
1260{ 1345{
@@ -1302,9 +1387,10 @@ static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
1302 row_repeat = false; 1387 row_repeat = false;
1303 } 1388 }
1304 1389
1305 REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12); 1390 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
1306 if (dss_has_feature(FEAT_ROWREPEATENABLE)) 1391 if (dss_has_feature(FEAT_ROWREPEATENABLE))
1307 REG_FLD_MOD(dispc_reg_att[plane], row_repeat ? 1 : 0, 18, 18); 1392 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
1393 row_repeat ? 1 : 0, 18, 18);
1308} 1394}
1309 1395
1310static int color_mode_to_bpp(enum omap_color_mode color_mode) 1396static int color_mode_to_bpp(enum omap_color_mode color_mode)
@@ -1317,12 +1403,17 @@ static int color_mode_to_bpp(enum omap_color_mode color_mode)
1317 case OMAP_DSS_COLOR_CLUT4: 1403 case OMAP_DSS_COLOR_CLUT4:
1318 return 4; 1404 return 4;
1319 case OMAP_DSS_COLOR_CLUT8: 1405 case OMAP_DSS_COLOR_CLUT8:
1406 case OMAP_DSS_COLOR_NV12:
1320 return 8; 1407 return 8;
1321 case OMAP_DSS_COLOR_RGB12U: 1408 case OMAP_DSS_COLOR_RGB12U:
1322 case OMAP_DSS_COLOR_RGB16: 1409 case OMAP_DSS_COLOR_RGB16:
1323 case OMAP_DSS_COLOR_ARGB16: 1410 case OMAP_DSS_COLOR_ARGB16:
1324 case OMAP_DSS_COLOR_YUV2: 1411 case OMAP_DSS_COLOR_YUV2:
1325 case OMAP_DSS_COLOR_UYVY: 1412 case OMAP_DSS_COLOR_UYVY:
1413 case OMAP_DSS_COLOR_RGBA16:
1414 case OMAP_DSS_COLOR_RGBX16:
1415 case OMAP_DSS_COLOR_ARGB16_1555:
1416 case OMAP_DSS_COLOR_XRGB16_1555:
1326 return 16; 1417 return 16;
1327 case OMAP_DSS_COLOR_RGB24P: 1418 case OMAP_DSS_COLOR_RGB24P:
1328 return 24; 1419 return 24;
@@ -1655,7 +1746,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
1655 enum omap_dss_rotation_type rotation_type, 1746 enum omap_dss_rotation_type rotation_type,
1656 u8 rotation, int mirror, 1747 u8 rotation, int mirror,
1657 u8 global_alpha, u8 pre_mult_alpha, 1748 u8 global_alpha, u8 pre_mult_alpha,
1658 enum omap_channel channel) 1749 enum omap_channel channel, u32 puv_addr)
1659{ 1750{
1660 const int maxdownscale = cpu_is_omap34xx() ? 4 : 2; 1751 const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
1661 bool five_taps = 0; 1752 bool five_taps = 0;
@@ -1704,7 +1795,8 @@ static int _dispc_setup_plane(enum omap_plane plane,
1704 return -EINVAL; 1795 return -EINVAL;
1705 1796
1706 if (color_mode == OMAP_DSS_COLOR_YUV2 || 1797 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1707 color_mode == OMAP_DSS_COLOR_UYVY) 1798 color_mode == OMAP_DSS_COLOR_UYVY ||
1799 color_mode == OMAP_DSS_COLOR_NV12)
1708 cconv = 1; 1800 cconv = 1;
1709 1801
1710 /* Must use 5-tap filter? */ 1802 /* Must use 5-tap filter? */
@@ -1778,6 +1870,12 @@ static int _dispc_setup_plane(enum omap_plane plane,
1778 _dispc_set_plane_ba0(plane, paddr + offset0); 1870 _dispc_set_plane_ba0(plane, paddr + offset0);
1779 _dispc_set_plane_ba1(plane, paddr + offset1); 1871 _dispc_set_plane_ba1(plane, paddr + offset1);
1780 1872
1873 if (OMAP_DSS_COLOR_NV12 == color_mode) {
1874 _dispc_set_plane_ba0_uv(plane, puv_addr + offset0);
1875 _dispc_set_plane_ba1_uv(plane, puv_addr + offset1);
1876 }
1877
1878
1781 _dispc_set_row_inc(plane, row_inc); 1879 _dispc_set_row_inc(plane, row_inc);
1782 _dispc_set_pix_inc(plane, pix_inc); 1880 _dispc_set_pix_inc(plane, pix_inc);
1783 1881
@@ -1791,7 +1889,8 @@ static int _dispc_setup_plane(enum omap_plane plane,
1791 if (plane != OMAP_DSS_GFX) { 1889 if (plane != OMAP_DSS_GFX) {
1792 _dispc_set_scaling(plane, width, height, 1890 _dispc_set_scaling(plane, width, height,
1793 out_width, out_height, 1891 out_width, out_height,
1794 ilace, five_taps, fieldmode); 1892 ilace, five_taps, fieldmode,
1893 color_mode, rotation);
1795 _dispc_set_vid_size(plane, out_width, out_height); 1894 _dispc_set_vid_size(plane, out_width, out_height);
1796 _dispc_set_vid_color_conv(plane, cconv); 1895 _dispc_set_vid_color_conv(plane, cconv);
1797 } 1896 }
@@ -1806,7 +1905,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
1806 1905
1807static void _dispc_enable_plane(enum omap_plane plane, bool enable) 1906static void _dispc_enable_plane(enum omap_plane plane, bool enable)
1808{ 1907{
1809 REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 0, 0); 1908 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
1810} 1909}
1811 1910
1812static void dispc_disable_isr(void *data, u32 mask) 1911static void dispc_disable_isr(void *data, u32 mask)
@@ -2353,14 +2452,20 @@ static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
2353 2452
2354unsigned long dispc_fclk_rate(void) 2453unsigned long dispc_fclk_rate(void)
2355{ 2454{
2455 struct platform_device *dsidev;
2356 unsigned long r = 0; 2456 unsigned long r = 0;
2357 2457
2358 switch (dss_get_dispc_clk_source()) { 2458 switch (dss_get_dispc_clk_source()) {
2359 case DSS_CLK_SRC_FCK: 2459 case OMAP_DSS_CLK_SRC_FCK:
2360 r = dss_clk_get_rate(DSS_CLK_FCK); 2460 r = dss_clk_get_rate(DSS_CLK_FCK);
2361 break; 2461 break;
2362 case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 2462 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
2363 r = dsi_get_pll_hsdiv_dispc_rate(); 2463 dsidev = dsi_get_dsidev_from_id(0);
2464 r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
2465 break;
2466 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
2467 dsidev = dsi_get_dsidev_from_id(1);
2468 r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
2364 break; 2469 break;
2365 default: 2470 default:
2366 BUG(); 2471 BUG();
@@ -2371,6 +2476,7 @@ unsigned long dispc_fclk_rate(void)
2371 2476
2372unsigned long dispc_lclk_rate(enum omap_channel channel) 2477unsigned long dispc_lclk_rate(enum omap_channel channel)
2373{ 2478{
2479 struct platform_device *dsidev;
2374 int lcd; 2480 int lcd;
2375 unsigned long r; 2481 unsigned long r;
2376 u32 l; 2482 u32 l;
@@ -2380,11 +2486,16 @@ unsigned long dispc_lclk_rate(enum omap_channel channel)
2380 lcd = FLD_GET(l, 23, 16); 2486 lcd = FLD_GET(l, 23, 16);
2381 2487
2382 switch (dss_get_lcd_clk_source(channel)) { 2488 switch (dss_get_lcd_clk_source(channel)) {
2383 case DSS_CLK_SRC_FCK: 2489 case OMAP_DSS_CLK_SRC_FCK:
2384 r = dss_clk_get_rate(DSS_CLK_FCK); 2490 r = dss_clk_get_rate(DSS_CLK_FCK);
2385 break; 2491 break;
2386 case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 2492 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
2387 r = dsi_get_pll_hsdiv_dispc_rate(); 2493 dsidev = dsi_get_dsidev_from_id(0);
2494 r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
2495 break;
2496 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
2497 dsidev = dsi_get_dsidev_from_id(1);
2498 r = dsi_get_pll_hsdiv_dispc_rate(dsidev);
2388 break; 2499 break;
2389 default: 2500 default:
2390 BUG(); 2501 BUG();
@@ -2412,8 +2523,8 @@ void dispc_dump_clocks(struct seq_file *s)
2412{ 2523{
2413 int lcd, pcd; 2524 int lcd, pcd;
2414 u32 l; 2525 u32 l;
2415 enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); 2526 enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
2416 enum dss_clk_source lcd_clk_src; 2527 enum omap_dss_clk_source lcd_clk_src;
2417 2528
2418 enable_clocks(1); 2529 enable_clocks(1);
2419 2530
@@ -2516,7 +2627,7 @@ void dispc_dump_irqs(struct seq_file *s)
2516 2627
2517void dispc_dump_regs(struct seq_file *s) 2628void dispc_dump_regs(struct seq_file *s)
2518{ 2629{
2519#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r)) 2630#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
2520 2631
2521 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 2632 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
2522 2633
@@ -2528,152 +2639,227 @@ void dispc_dump_regs(struct seq_file *s)
2528 DUMPREG(DISPC_CONTROL); 2639 DUMPREG(DISPC_CONTROL);
2529 DUMPREG(DISPC_CONFIG); 2640 DUMPREG(DISPC_CONFIG);
2530 DUMPREG(DISPC_CAPABLE); 2641 DUMPREG(DISPC_CAPABLE);
2531 DUMPREG(DISPC_DEFAULT_COLOR(0)); 2642 DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD));
2532 DUMPREG(DISPC_DEFAULT_COLOR(1)); 2643 DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT));
2533 DUMPREG(DISPC_TRANS_COLOR(0)); 2644 DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_LCD));
2534 DUMPREG(DISPC_TRANS_COLOR(1)); 2645 DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT));
2535 DUMPREG(DISPC_LINE_STATUS); 2646 DUMPREG(DISPC_LINE_STATUS);
2536 DUMPREG(DISPC_LINE_NUMBER); 2647 DUMPREG(DISPC_LINE_NUMBER);
2537 DUMPREG(DISPC_TIMING_H(0)); 2648 DUMPREG(DISPC_TIMING_H(OMAP_DSS_CHANNEL_LCD));
2538 DUMPREG(DISPC_TIMING_V(0)); 2649 DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD));
2539 DUMPREG(DISPC_POL_FREQ(0)); 2650 DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD));
2540 DUMPREG(DISPC_DIVISORo(0)); 2651 DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD));
2541 DUMPREG(DISPC_GLOBAL_ALPHA); 2652 DUMPREG(DISPC_GLOBAL_ALPHA);
2542 DUMPREG(DISPC_SIZE_DIG); 2653 DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
2543 DUMPREG(DISPC_SIZE_LCD(0)); 2654 DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
2544 if (dss_has_feature(FEAT_MGR_LCD2)) { 2655 if (dss_has_feature(FEAT_MGR_LCD2)) {
2545 DUMPREG(DISPC_CONTROL2); 2656 DUMPREG(DISPC_CONTROL2);
2546 DUMPREG(DISPC_CONFIG2); 2657 DUMPREG(DISPC_CONFIG2);
2547 DUMPREG(DISPC_DEFAULT_COLOR(2)); 2658 DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2));
2548 DUMPREG(DISPC_TRANS_COLOR(2)); 2659 DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2));
2549 DUMPREG(DISPC_TIMING_H(2)); 2660 DUMPREG(DISPC_TIMING_H(OMAP_DSS_CHANNEL_LCD2));
2550 DUMPREG(DISPC_TIMING_V(2)); 2661 DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD2));
2551 DUMPREG(DISPC_POL_FREQ(2)); 2662 DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD2));
2552 DUMPREG(DISPC_DIVISORo(2)); 2663 DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD2));
2553 DUMPREG(DISPC_SIZE_LCD(2)); 2664 DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD2));
2554 } 2665 }
2555 2666
2556 DUMPREG(DISPC_GFX_BA0); 2667 DUMPREG(DISPC_OVL_BA0(OMAP_DSS_GFX));
2557 DUMPREG(DISPC_GFX_BA1); 2668 DUMPREG(DISPC_OVL_BA1(OMAP_DSS_GFX));
2558 DUMPREG(DISPC_GFX_POSITION); 2669 DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_GFX));
2559 DUMPREG(DISPC_GFX_SIZE); 2670 DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_GFX));
2560 DUMPREG(DISPC_GFX_ATTRIBUTES); 2671 DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_GFX));
2561 DUMPREG(DISPC_GFX_FIFO_THRESHOLD); 2672 DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_GFX));
2562 DUMPREG(DISPC_GFX_FIFO_SIZE_STATUS); 2673 DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_GFX));
2563 DUMPREG(DISPC_GFX_ROW_INC); 2674 DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_GFX));
2564 DUMPREG(DISPC_GFX_PIXEL_INC); 2675 DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_GFX));
2565 DUMPREG(DISPC_GFX_WINDOW_SKIP); 2676 DUMPREG(DISPC_OVL_WINDOW_SKIP(OMAP_DSS_GFX));
2566 DUMPREG(DISPC_GFX_TABLE_BA); 2677 DUMPREG(DISPC_OVL_TABLE_BA(OMAP_DSS_GFX));
2567 2678
2568 DUMPREG(DISPC_DATA_CYCLE1(0)); 2679 DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD));
2569 DUMPREG(DISPC_DATA_CYCLE2(0)); 2680 DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
2570 DUMPREG(DISPC_DATA_CYCLE3(0)); 2681 DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
2571 2682
2572 DUMPREG(DISPC_CPR_COEF_R(0)); 2683 DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
2573 DUMPREG(DISPC_CPR_COEF_G(0)); 2684 DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
2574 DUMPREG(DISPC_CPR_COEF_B(0)); 2685 DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
2575 if (dss_has_feature(FEAT_MGR_LCD2)) { 2686 if (dss_has_feature(FEAT_MGR_LCD2)) {
2576 DUMPREG(DISPC_DATA_CYCLE1(2)); 2687 DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
2577 DUMPREG(DISPC_DATA_CYCLE2(2)); 2688 DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
2578 DUMPREG(DISPC_DATA_CYCLE3(2)); 2689 DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
2579 2690
2580 DUMPREG(DISPC_CPR_COEF_R(2)); 2691 DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
2581 DUMPREG(DISPC_CPR_COEF_G(2)); 2692 DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
2582 DUMPREG(DISPC_CPR_COEF_B(2)); 2693 DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
2583 } 2694 }
2584 2695
2585 DUMPREG(DISPC_GFX_PRELOAD); 2696 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
2586 2697
2587 DUMPREG(DISPC_VID_BA0(0)); 2698 DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1));
2588 DUMPREG(DISPC_VID_BA1(0)); 2699 DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1));
2589 DUMPREG(DISPC_VID_POSITION(0)); 2700 DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_VIDEO1));
2590 DUMPREG(DISPC_VID_SIZE(0)); 2701 DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_VIDEO1));
2591 DUMPREG(DISPC_VID_ATTRIBUTES(0)); 2702 DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO1));
2592 DUMPREG(DISPC_VID_FIFO_THRESHOLD(0)); 2703 DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1));
2593 DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(0)); 2704 DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_VIDEO1));
2594 DUMPREG(DISPC_VID_ROW_INC(0)); 2705 DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_VIDEO1));
2595 DUMPREG(DISPC_VID_PIXEL_INC(0)); 2706 DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_VIDEO1));
2596 DUMPREG(DISPC_VID_FIR(0)); 2707 DUMPREG(DISPC_OVL_FIR(OMAP_DSS_VIDEO1));
2597 DUMPREG(DISPC_VID_PICTURE_SIZE(0)); 2708 DUMPREG(DISPC_OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1));
2598 DUMPREG(DISPC_VID_ACCU0(0)); 2709 DUMPREG(DISPC_OVL_ACCU0(OMAP_DSS_VIDEO1));
2599 DUMPREG(DISPC_VID_ACCU1(0)); 2710 DUMPREG(DISPC_OVL_ACCU1(OMAP_DSS_VIDEO1));
2600 2711
2601 DUMPREG(DISPC_VID_BA0(1)); 2712 DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO2));
2602 DUMPREG(DISPC_VID_BA1(1)); 2713 DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO2));
2603 DUMPREG(DISPC_VID_POSITION(1)); 2714 DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_VIDEO2));
2604 DUMPREG(DISPC_VID_SIZE(1)); 2715 DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_VIDEO2));
2605 DUMPREG(DISPC_VID_ATTRIBUTES(1)); 2716 DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO2));
2606 DUMPREG(DISPC_VID_FIFO_THRESHOLD(1)); 2717 DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2));
2607 DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(1)); 2718 DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_VIDEO2));
2608 DUMPREG(DISPC_VID_ROW_INC(1)); 2719 DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_VIDEO2));
2609 DUMPREG(DISPC_VID_PIXEL_INC(1)); 2720 DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_VIDEO2));
2610 DUMPREG(DISPC_VID_FIR(1)); 2721 DUMPREG(DISPC_OVL_FIR(OMAP_DSS_VIDEO2));
2611 DUMPREG(DISPC_VID_PICTURE_SIZE(1)); 2722 DUMPREG(DISPC_OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2));
2612 DUMPREG(DISPC_VID_ACCU0(1)); 2723 DUMPREG(DISPC_OVL_ACCU0(OMAP_DSS_VIDEO2));
2613 DUMPREG(DISPC_VID_ACCU1(1)); 2724 DUMPREG(DISPC_OVL_ACCU1(OMAP_DSS_VIDEO2));
2614 2725
2615 DUMPREG(DISPC_VID_FIR_COEF_H(0, 0)); 2726 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 0));
2616 DUMPREG(DISPC_VID_FIR_COEF_H(0, 1)); 2727 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 1));
2617 DUMPREG(DISPC_VID_FIR_COEF_H(0, 2)); 2728 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 2));
2618 DUMPREG(DISPC_VID_FIR_COEF_H(0, 3)); 2729 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 3));
2619 DUMPREG(DISPC_VID_FIR_COEF_H(0, 4)); 2730 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 4));
2620 DUMPREG(DISPC_VID_FIR_COEF_H(0, 5)); 2731 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 5));
2621 DUMPREG(DISPC_VID_FIR_COEF_H(0, 6)); 2732 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 6));
2622 DUMPREG(DISPC_VID_FIR_COEF_H(0, 7)); 2733 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 7));
2623 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 0)); 2734 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 0));
2624 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 1)); 2735 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 1));
2625 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 2)); 2736 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 2));
2626 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 3)); 2737 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 3));
2627 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 4)); 2738 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 4));
2628 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 5)); 2739 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 5));
2629 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 6)); 2740 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 6));
2630 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 7)); 2741 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 7));
2631 DUMPREG(DISPC_VID_CONV_COEF(0, 0)); 2742 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 0));
2632 DUMPREG(DISPC_VID_CONV_COEF(0, 1)); 2743 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 1));
2633 DUMPREG(DISPC_VID_CONV_COEF(0, 2)); 2744 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2));
2634 DUMPREG(DISPC_VID_CONV_COEF(0, 3)); 2745 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3));
2635 DUMPREG(DISPC_VID_CONV_COEF(0, 4)); 2746 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4));
2636 DUMPREG(DISPC_VID_FIR_COEF_V(0, 0)); 2747 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
2637 DUMPREG(DISPC_VID_FIR_COEF_V(0, 1)); 2748 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
2638 DUMPREG(DISPC_VID_FIR_COEF_V(0, 2)); 2749 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
2639 DUMPREG(DISPC_VID_FIR_COEF_V(0, 3)); 2750 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
2640 DUMPREG(DISPC_VID_FIR_COEF_V(0, 4)); 2751 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
2641 DUMPREG(DISPC_VID_FIR_COEF_V(0, 5)); 2752 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
2642 DUMPREG(DISPC_VID_FIR_COEF_V(0, 6)); 2753 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
2643 DUMPREG(DISPC_VID_FIR_COEF_V(0, 7)); 2754 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
2644 2755
2645 DUMPREG(DISPC_VID_FIR_COEF_H(1, 0)); 2756 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
2646 DUMPREG(DISPC_VID_FIR_COEF_H(1, 1)); 2757 DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1));
2647 DUMPREG(DISPC_VID_FIR_COEF_H(1, 2)); 2758 DUMPREG(DISPC_OVL_BA1_UV(OMAP_DSS_VIDEO1));
2648 DUMPREG(DISPC_VID_FIR_COEF_H(1, 3)); 2759 DUMPREG(DISPC_OVL_FIR2(OMAP_DSS_VIDEO1));
2649 DUMPREG(DISPC_VID_FIR_COEF_H(1, 4)); 2760 DUMPREG(DISPC_OVL_ACCU2_0(OMAP_DSS_VIDEO1));
2650 DUMPREG(DISPC_VID_FIR_COEF_H(1, 5)); 2761 DUMPREG(DISPC_OVL_ACCU2_1(OMAP_DSS_VIDEO1));
2651 DUMPREG(DISPC_VID_FIR_COEF_H(1, 6)); 2762
2652 DUMPREG(DISPC_VID_FIR_COEF_H(1, 7)); 2763 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 0));
2653 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 0)); 2764 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 1));
2654 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 1)); 2765 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 2));
2655 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 2)); 2766 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 3));
2656 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 3)); 2767 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 4));
2657 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 4)); 2768 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 5));
2658 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 5)); 2769 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 6));
2659 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 6)); 2770 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 7));
2660 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 7)); 2771
2661 DUMPREG(DISPC_VID_CONV_COEF(1, 0)); 2772 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 0));
2662 DUMPREG(DISPC_VID_CONV_COEF(1, 1)); 2773 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 1));
2663 DUMPREG(DISPC_VID_CONV_COEF(1, 2)); 2774 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 2));
2664 DUMPREG(DISPC_VID_CONV_COEF(1, 3)); 2775 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 3));
2665 DUMPREG(DISPC_VID_CONV_COEF(1, 4)); 2776 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 4));
2666 DUMPREG(DISPC_VID_FIR_COEF_V(1, 0)); 2777 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 5));
2667 DUMPREG(DISPC_VID_FIR_COEF_V(1, 1)); 2778 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 6));
2668 DUMPREG(DISPC_VID_FIR_COEF_V(1, 2)); 2779 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 7));
2669 DUMPREG(DISPC_VID_FIR_COEF_V(1, 3)); 2780
2670 DUMPREG(DISPC_VID_FIR_COEF_V(1, 4)); 2781 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 0));
2671 DUMPREG(DISPC_VID_FIR_COEF_V(1, 5)); 2782 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 1));
2672 DUMPREG(DISPC_VID_FIR_COEF_V(1, 6)); 2783 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 2));
2673 DUMPREG(DISPC_VID_FIR_COEF_V(1, 7)); 2784 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 3));
2674 2785 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 4));
2675 DUMPREG(DISPC_VID_PRELOAD(0)); 2786 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 5));
2676 DUMPREG(DISPC_VID_PRELOAD(1)); 2787 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 6));
2788 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 7));
2789 }
2790 if (dss_has_feature(FEAT_ATTR2))
2791 DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
2792
2793
2794 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 0));
2795 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 1));
2796 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 2));
2797 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 3));
2798 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 4));
2799 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 5));
2800 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 6));
2801 DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 7));
2802 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 0));
2803 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 1));
2804 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 2));
2805 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 3));
2806 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 4));
2807 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 5));
2808 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 6));
2809 DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 7));
2810 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 0));
2811 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 1));
2812 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2));
2813 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3));
2814 DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4));
2815 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
2816 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
2817 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
2818 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
2819 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
2820 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
2821 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
2822 DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
2823
2824 if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
2825 DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2));
2826 DUMPREG(DISPC_OVL_BA1_UV(OMAP_DSS_VIDEO2));
2827 DUMPREG(DISPC_OVL_FIR2(OMAP_DSS_VIDEO2));
2828 DUMPREG(DISPC_OVL_ACCU2_0(OMAP_DSS_VIDEO2));
2829 DUMPREG(DISPC_OVL_ACCU2_1(OMAP_DSS_VIDEO2));
2830
2831 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 0));
2832 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 1));
2833 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 2));
2834 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 3));
2835 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 4));
2836 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 5));
2837 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 6));
2838 DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 7));
2839
2840 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 0));
2841 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 1));
2842 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 2));
2843 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 3));
2844 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 4));
2845 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 5));
2846 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 6));
2847 DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 7));
2848
2849 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 0));
2850 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 1));
2851 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 2));
2852 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 3));
2853 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 4));
2854 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 5));
2855 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 6));
2856 DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 7));
2857 }
2858 if (dss_has_feature(FEAT_ATTR2))
2859 DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
2860
2861 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
2862 DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
2677 2863
2678 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 2864 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
2679#undef DUMPREG 2865#undef DUMPREG
@@ -3388,11 +3574,12 @@ int dispc_setup_plane(enum omap_plane plane,
3388 bool ilace, 3574 bool ilace,
3389 enum omap_dss_rotation_type rotation_type, 3575 enum omap_dss_rotation_type rotation_type,
3390 u8 rotation, bool mirror, u8 global_alpha, 3576 u8 rotation, bool mirror, u8 global_alpha,
3391 u8 pre_mult_alpha, enum omap_channel channel) 3577 u8 pre_mult_alpha, enum omap_channel channel,
3578 u32 puv_addr)
3392{ 3579{
3393 int r = 0; 3580 int r = 0;
3394 3581
3395 DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> " 3582 DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d, %d, %dx%d -> "
3396 "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n", 3583 "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
3397 plane, paddr, screen_width, pos_x, pos_y, 3584 plane, paddr, screen_width, pos_x, pos_y,
3398 width, height, 3585 width, height,
@@ -3411,7 +3598,8 @@ int dispc_setup_plane(enum omap_plane plane,
3411 rotation_type, 3598 rotation_type,
3412 rotation, mirror, 3599 rotation, mirror,
3413 global_alpha, 3600 global_alpha,
3414 pre_mult_alpha, channel); 3601 pre_mult_alpha,
3602 channel, puv_addr);
3415 3603
3416 enable_clocks(0); 3604 enable_clocks(0);
3417 3605
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
new file mode 100644
index 000000000000..6c9ee0a0efb3
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc.h
@@ -0,0 +1,691 @@
1/*
2 * linux/drivers/video/omap2/dss/dispc.h
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Archit Taneja <archit@ti.com>
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __OMAP2_DISPC_REG_H
22#define __OMAP2_DISPC_REG_H
23
24/* DISPC common registers */
25#define DISPC_REVISION 0x0000
26#define DISPC_SYSCONFIG 0x0010
27#define DISPC_SYSSTATUS 0x0014
28#define DISPC_IRQSTATUS 0x0018
29#define DISPC_IRQENABLE 0x001C
30#define DISPC_CONTROL 0x0040
31#define DISPC_CONFIG 0x0044
32#define DISPC_CAPABLE 0x0048
33#define DISPC_LINE_STATUS 0x005C
34#define DISPC_LINE_NUMBER 0x0060
35#define DISPC_GLOBAL_ALPHA 0x0074
36#define DISPC_CONTROL2 0x0238
37#define DISPC_CONFIG2 0x0620
38#define DISPC_DIVISOR 0x0804
39
40/* DISPC overlay registers */
41#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
42 DISPC_BA0_OFFSET(n))
43#define DISPC_OVL_BA1(n) (DISPC_OVL_BASE(n) + \
44 DISPC_BA1_OFFSET(n))
45#define DISPC_OVL_BA0_UV(n) (DISPC_OVL_BASE(n) + \
46 DISPC_BA0_UV_OFFSET(n))
47#define DISPC_OVL_BA1_UV(n) (DISPC_OVL_BASE(n) + \
48 DISPC_BA1_UV_OFFSET(n))
49#define DISPC_OVL_POSITION(n) (DISPC_OVL_BASE(n) + \
50 DISPC_POS_OFFSET(n))
51#define DISPC_OVL_SIZE(n) (DISPC_OVL_BASE(n) + \
52 DISPC_SIZE_OFFSET(n))
53#define DISPC_OVL_ATTRIBUTES(n) (DISPC_OVL_BASE(n) + \
54 DISPC_ATTR_OFFSET(n))
55#define DISPC_OVL_ATTRIBUTES2(n) (DISPC_OVL_BASE(n) + \
56 DISPC_ATTR2_OFFSET(n))
57#define DISPC_OVL_FIFO_THRESHOLD(n) (DISPC_OVL_BASE(n) + \
58 DISPC_FIFO_THRESH_OFFSET(n))
59#define DISPC_OVL_FIFO_SIZE_STATUS(n) (DISPC_OVL_BASE(n) + \
60 DISPC_FIFO_SIZE_STATUS_OFFSET(n))
61#define DISPC_OVL_ROW_INC(n) (DISPC_OVL_BASE(n) + \
62 DISPC_ROW_INC_OFFSET(n))
63#define DISPC_OVL_PIXEL_INC(n) (DISPC_OVL_BASE(n) + \
64 DISPC_PIX_INC_OFFSET(n))
65#define DISPC_OVL_WINDOW_SKIP(n) (DISPC_OVL_BASE(n) + \
66 DISPC_WINDOW_SKIP_OFFSET(n))
67#define DISPC_OVL_TABLE_BA(n) (DISPC_OVL_BASE(n) + \
68 DISPC_TABLE_BA_OFFSET(n))
69#define DISPC_OVL_FIR(n) (DISPC_OVL_BASE(n) + \
70 DISPC_FIR_OFFSET(n))
71#define DISPC_OVL_FIR2(n) (DISPC_OVL_BASE(n) + \
72 DISPC_FIR2_OFFSET(n))
73#define DISPC_OVL_PICTURE_SIZE(n) (DISPC_OVL_BASE(n) + \
74 DISPC_PIC_SIZE_OFFSET(n))
75#define DISPC_OVL_ACCU0(n) (DISPC_OVL_BASE(n) + \
76 DISPC_ACCU0_OFFSET(n))
77#define DISPC_OVL_ACCU1(n) (DISPC_OVL_BASE(n) + \
78 DISPC_ACCU1_OFFSET(n))
79#define DISPC_OVL_ACCU2_0(n) (DISPC_OVL_BASE(n) + \
80 DISPC_ACCU2_0_OFFSET(n))
81#define DISPC_OVL_ACCU2_1(n) (DISPC_OVL_BASE(n) + \
82 DISPC_ACCU2_1_OFFSET(n))
83#define DISPC_OVL_FIR_COEF_H(n, i) (DISPC_OVL_BASE(n) + \
84 DISPC_FIR_COEF_H_OFFSET(n, i))
85#define DISPC_OVL_FIR_COEF_HV(n, i) (DISPC_OVL_BASE(n) + \
86 DISPC_FIR_COEF_HV_OFFSET(n, i))
87#define DISPC_OVL_FIR_COEF_H2(n, i) (DISPC_OVL_BASE(n) + \
88 DISPC_FIR_COEF_H2_OFFSET(n, i))
89#define DISPC_OVL_FIR_COEF_HV2(n, i) (DISPC_OVL_BASE(n) + \
90 DISPC_FIR_COEF_HV2_OFFSET(n, i))
91#define DISPC_OVL_CONV_COEF(n, i) (DISPC_OVL_BASE(n) + \
92 DISPC_CONV_COEF_OFFSET(n, i))
93#define DISPC_OVL_FIR_COEF_V(n, i) (DISPC_OVL_BASE(n) + \
94 DISPC_FIR_COEF_V_OFFSET(n, i))
95#define DISPC_OVL_FIR_COEF_V2(n, i) (DISPC_OVL_BASE(n) + \
96 DISPC_FIR_COEF_V2_OFFSET(n, i))
97#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \
98 DISPC_PRELOAD_OFFSET(n))
99
100/* DISPC manager/channel specific registers */
101static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
102{
103 switch (channel) {
104 case OMAP_DSS_CHANNEL_LCD:
105 return 0x004C;
106 case OMAP_DSS_CHANNEL_DIGIT:
107 return 0x0050;
108 case OMAP_DSS_CHANNEL_LCD2:
109 return 0x03AC;
110 default:
111 BUG();
112 }
113}
114
115static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel)
116{
117 switch (channel) {
118 case OMAP_DSS_CHANNEL_LCD:
119 return 0x0054;
120 case OMAP_DSS_CHANNEL_DIGIT:
121 return 0x0058;
122 case OMAP_DSS_CHANNEL_LCD2:
123 return 0x03B0;
124 default:
125 BUG();
126 }
127}
128
129static inline u16 DISPC_TIMING_H(enum omap_channel channel)
130{
131 switch (channel) {
132 case OMAP_DSS_CHANNEL_LCD:
133 return 0x0064;
134 case OMAP_DSS_CHANNEL_DIGIT:
135 BUG();
136 case OMAP_DSS_CHANNEL_LCD2:
137 return 0x0400;
138 default:
139 BUG();
140 }
141}
142
143static inline u16 DISPC_TIMING_V(enum omap_channel channel)
144{
145 switch (channel) {
146 case OMAP_DSS_CHANNEL_LCD:
147 return 0x0068;
148 case OMAP_DSS_CHANNEL_DIGIT:
149 BUG();
150 case OMAP_DSS_CHANNEL_LCD2:
151 return 0x0404;
152 default:
153 BUG();
154 }
155}
156
157static inline u16 DISPC_POL_FREQ(enum omap_channel channel)
158{
159 switch (channel) {
160 case OMAP_DSS_CHANNEL_LCD:
161 return 0x006C;
162 case OMAP_DSS_CHANNEL_DIGIT:
163 BUG();
164 case OMAP_DSS_CHANNEL_LCD2:
165 return 0x0408;
166 default:
167 BUG();
168 }
169}
170
171static inline u16 DISPC_DIVISORo(enum omap_channel channel)
172{
173 switch (channel) {
174 case OMAP_DSS_CHANNEL_LCD:
175 return 0x0070;
176 case OMAP_DSS_CHANNEL_DIGIT:
177 BUG();
178 case OMAP_DSS_CHANNEL_LCD2:
179 return 0x040C;
180 default:
181 BUG();
182 }
183}
184
185/* Named as DISPC_SIZE_LCD, DISPC_SIZE_DIGIT and DISPC_SIZE_LCD2 in TRM */
186static inline u16 DISPC_SIZE_MGR(enum omap_channel channel)
187{
188 switch (channel) {
189 case OMAP_DSS_CHANNEL_LCD:
190 return 0x007C;
191 case OMAP_DSS_CHANNEL_DIGIT:
192 return 0x0078;
193 case OMAP_DSS_CHANNEL_LCD2:
194 return 0x03CC;
195 default:
196 BUG();
197 }
198}
199
200static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel)
201{
202 switch (channel) {
203 case OMAP_DSS_CHANNEL_LCD:
204 return 0x01D4;
205 case OMAP_DSS_CHANNEL_DIGIT:
206 BUG();
207 case OMAP_DSS_CHANNEL_LCD2:
208 return 0x03C0;
209 default:
210 BUG();
211 }
212}
213
214static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel)
215{
216 switch (channel) {
217 case OMAP_DSS_CHANNEL_LCD:
218 return 0x01D8;
219 case OMAP_DSS_CHANNEL_DIGIT:
220 BUG();
221 case OMAP_DSS_CHANNEL_LCD2:
222 return 0x03C4;
223 default:
224 BUG();
225 }
226}
227
228static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel)
229{
230 switch (channel) {
231 case OMAP_DSS_CHANNEL_LCD:
232 return 0x01DC;
233 case OMAP_DSS_CHANNEL_DIGIT:
234 BUG();
235 case OMAP_DSS_CHANNEL_LCD2:
236 return 0x03C8;
237 default:
238 BUG();
239 }
240}
241
242static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel)
243{
244 switch (channel) {
245 case OMAP_DSS_CHANNEL_LCD:
246 return 0x0220;
247 case OMAP_DSS_CHANNEL_DIGIT:
248 BUG();
249 case OMAP_DSS_CHANNEL_LCD2:
250 return 0x03BC;
251 default:
252 BUG();
253 }
254}
255
256static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel)
257{
258 switch (channel) {
259 case OMAP_DSS_CHANNEL_LCD:
260 return 0x0224;
261 case OMAP_DSS_CHANNEL_DIGIT:
262 BUG();
263 case OMAP_DSS_CHANNEL_LCD2:
264 return 0x03B8;
265 default:
266 BUG();
267 }
268}
269
270static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel)
271{
272 switch (channel) {
273 case OMAP_DSS_CHANNEL_LCD:
274 return 0x0228;
275 case OMAP_DSS_CHANNEL_DIGIT:
276 BUG();
277 case OMAP_DSS_CHANNEL_LCD2:
278 return 0x03B4;
279 default:
280 BUG();
281 }
282}
283
284/* DISPC overlay register base addresses */
285static inline u16 DISPC_OVL_BASE(enum omap_plane plane)
286{
287 switch (plane) {
288 case OMAP_DSS_GFX:
289 return 0x0080;
290 case OMAP_DSS_VIDEO1:
291 return 0x00BC;
292 case OMAP_DSS_VIDEO2:
293 return 0x014C;
294 default:
295 BUG();
296 }
297}
298
299/* DISPC overlay register offsets */
300static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane)
301{
302 switch (plane) {
303 case OMAP_DSS_GFX:
304 case OMAP_DSS_VIDEO1:
305 case OMAP_DSS_VIDEO2:
306 return 0x0000;
307 default:
308 BUG();
309 }
310}
311
312static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane)
313{
314 switch (plane) {
315 case OMAP_DSS_GFX:
316 case OMAP_DSS_VIDEO1:
317 case OMAP_DSS_VIDEO2:
318 return 0x0004;
319 default:
320 BUG();
321 }
322}
323
324static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
325{
326 switch (plane) {
327 case OMAP_DSS_GFX:
328 BUG();
329 case OMAP_DSS_VIDEO1:
330 return 0x0544;
331 case OMAP_DSS_VIDEO2:
332 return 0x04BC;
333 default:
334 BUG();
335 }
336}
337
338static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
339{
340 switch (plane) {
341 case OMAP_DSS_GFX:
342 BUG();
343 case OMAP_DSS_VIDEO1:
344 return 0x0548;
345 case OMAP_DSS_VIDEO2:
346 return 0x04C0;
347 default:
348 BUG();
349 }
350}
351
352static inline u16 DISPC_POS_OFFSET(enum omap_plane plane)
353{
354 switch (plane) {
355 case OMAP_DSS_GFX:
356 case OMAP_DSS_VIDEO1:
357 case OMAP_DSS_VIDEO2:
358 return 0x0008;
359 default:
360 BUG();
361 }
362}
363
364static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane)
365{
366 switch (plane) {
367 case OMAP_DSS_GFX:
368 case OMAP_DSS_VIDEO1:
369 case OMAP_DSS_VIDEO2:
370 return 0x000C;
371 default:
372 BUG();
373 }
374}
375
376static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane)
377{
378 switch (plane) {
379 case OMAP_DSS_GFX:
380 return 0x0020;
381 case OMAP_DSS_VIDEO1:
382 case OMAP_DSS_VIDEO2:
383 return 0x0010;
384 default:
385 BUG();
386 }
387}
388
389static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
390{
391 switch (plane) {
392 case OMAP_DSS_GFX:
393 BUG();
394 case OMAP_DSS_VIDEO1:
395 return 0x0568;
396 case OMAP_DSS_VIDEO2:
397 return 0x04DC;
398 default:
399 BUG();
400 }
401}
402
403static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane)
404{
405 switch (plane) {
406 case OMAP_DSS_GFX:
407 return 0x0024;
408 case OMAP_DSS_VIDEO1:
409 case OMAP_DSS_VIDEO2:
410 return 0x0014;
411 default:
412 BUG();
413 }
414}
415
416static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane)
417{
418 switch (plane) {
419 case OMAP_DSS_GFX:
420 return 0x0028;
421 case OMAP_DSS_VIDEO1:
422 case OMAP_DSS_VIDEO2:
423 return 0x0018;
424 default:
425 BUG();
426 }
427}
428
429static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane)
430{
431 switch (plane) {
432 case OMAP_DSS_GFX:
433 return 0x002C;
434 case OMAP_DSS_VIDEO1:
435 case OMAP_DSS_VIDEO2:
436 return 0x001C;
437 default:
438 BUG();
439 }
440}
441
442static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane)
443{
444 switch (plane) {
445 case OMAP_DSS_GFX:
446 return 0x0030;
447 case OMAP_DSS_VIDEO1:
448 case OMAP_DSS_VIDEO2:
449 return 0x0020;
450 default:
451 BUG();
452 }
453}
454
455static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane)
456{
457 switch (plane) {
458 case OMAP_DSS_GFX:
459 return 0x0034;
460 case OMAP_DSS_VIDEO1:
461 case OMAP_DSS_VIDEO2:
462 BUG();
463 default:
464 BUG();
465 }
466}
467
468static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane)
469{
470 switch (plane) {
471 case OMAP_DSS_GFX:
472 return 0x0038;
473 case OMAP_DSS_VIDEO1:
474 case OMAP_DSS_VIDEO2:
475 BUG();
476 default:
477 BUG();
478 }
479}
480
481static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
482{
483 switch (plane) {
484 case OMAP_DSS_GFX:
485 BUG();
486 case OMAP_DSS_VIDEO1:
487 case OMAP_DSS_VIDEO2:
488 return 0x0024;
489 default:
490 BUG();
491 }
492}
493
494static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
495{
496 switch (plane) {
497 case OMAP_DSS_GFX:
498 BUG();
499 case OMAP_DSS_VIDEO1:
500 return 0x0580;
501 case OMAP_DSS_VIDEO2:
502 return 0x055C;
503 default:
504 BUG();
505 }
506}
507
508static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
509{
510 switch (plane) {
511 case OMAP_DSS_GFX:
512 BUG();
513 case OMAP_DSS_VIDEO1:
514 case OMAP_DSS_VIDEO2:
515 return 0x0028;
516 default:
517 BUG();
518 }
519}
520
521
522static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
523{
524 switch (plane) {
525 case OMAP_DSS_GFX:
526 BUG();
527 case OMAP_DSS_VIDEO1:
528 case OMAP_DSS_VIDEO2:
529 return 0x002C;
530 default:
531 BUG();
532 }
533}
534
535static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
536{
537 switch (plane) {
538 case OMAP_DSS_GFX:
539 BUG();
540 case OMAP_DSS_VIDEO1:
541 return 0x0584;
542 case OMAP_DSS_VIDEO2:
543 return 0x0560;
544 default:
545 BUG();
546 }
547}
548
549static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
550{
551 switch (plane) {
552 case OMAP_DSS_GFX:
553 BUG();
554 case OMAP_DSS_VIDEO1:
555 case OMAP_DSS_VIDEO2:
556 return 0x0030;
557 default:
558 BUG();
559 }
560}
561
562static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
563{
564 switch (plane) {
565 case OMAP_DSS_GFX:
566 BUG();
567 case OMAP_DSS_VIDEO1:
568 return 0x0588;
569 case OMAP_DSS_VIDEO2:
570 return 0x0564;
571 default:
572 BUG();
573 }
574}
575
576/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
577static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
578{
579 switch (plane) {
580 case OMAP_DSS_GFX:
581 BUG();
582 case OMAP_DSS_VIDEO1:
583 case OMAP_DSS_VIDEO2:
584 return 0x0034 + i * 0x8;
585 default:
586 BUG();
587 }
588}
589
590/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
591static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
592{
593 switch (plane) {
594 case OMAP_DSS_GFX:
595 BUG();
596 case OMAP_DSS_VIDEO1:
597 return 0x058C + i * 0x8;
598 case OMAP_DSS_VIDEO2:
599 return 0x0568 + i * 0x8;
600 default:
601 BUG();
602 }
603}
604
605/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
606static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
607{
608 switch (plane) {
609 case OMAP_DSS_GFX:
610 BUG();
611 case OMAP_DSS_VIDEO1:
612 case OMAP_DSS_VIDEO2:
613 return 0x0038 + i * 0x8;
614 default:
615 BUG();
616 }
617}
618
619/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
620static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
621{
622 switch (plane) {
623 case OMAP_DSS_GFX:
624 BUG();
625 case OMAP_DSS_VIDEO1:
626 return 0x0590 + i * 8;
627 case OMAP_DSS_VIDEO2:
628 return 0x056C + i * 0x8;
629 default:
630 BUG();
631 }
632}
633
634/* coef index i = {0, 1, 2, 3, 4,} */
635static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i)
636{
637 switch (plane) {
638 case OMAP_DSS_GFX:
639 BUG();
640 case OMAP_DSS_VIDEO1:
641 case OMAP_DSS_VIDEO2:
642 return 0x0074 + i * 0x4;
643 default:
644 BUG();
645 }
646}
647
648/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
649static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
650{
651 switch (plane) {
652 case OMAP_DSS_GFX:
653 BUG();
654 case OMAP_DSS_VIDEO1:
655 return 0x0124 + i * 0x4;
656 case OMAP_DSS_VIDEO2:
657 return 0x00B4 + i * 0x4;
658 default:
659 BUG();
660 }
661}
662
663/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
664static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
665{
666 switch (plane) {
667 case OMAP_DSS_GFX:
668 BUG();
669 case OMAP_DSS_VIDEO1:
670 return 0x05CC + i * 0x4;
671 case OMAP_DSS_VIDEO2:
672 return 0x05A8 + i * 0x4;
673 default:
674 BUG();
675 }
676}
677
678static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
679{
680 switch (plane) {
681 case OMAP_DSS_GFX:
682 return 0x01AC;
683 case OMAP_DSS_VIDEO1:
684 return 0x0174;
685 case OMAP_DSS_VIDEO2:
686 return 0x00E8;
687 default:
688 BUG();
689 }
690}
691#endif
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index a85a6f38b40c..c2dfc8c50057 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -27,7 +27,7 @@
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29 29
30#include <plat/display.h> 30#include <video/omapdss.h>
31#include "dss.h" 31#include "dss.h"
32 32
33static ssize_t display_enabled_show(struct device *dev, 33static ssize_t display_enabled_show(struct device *dev,
@@ -44,9 +44,13 @@ static ssize_t display_enabled_store(struct device *dev,
44 const char *buf, size_t size) 44 const char *buf, size_t size)
45{ 45{
46 struct omap_dss_device *dssdev = to_dss_device(dev); 46 struct omap_dss_device *dssdev = to_dss_device(dev);
47 bool enabled, r; 47 int r, enabled;
48 48
49 enabled = simple_strtoul(buf, NULL, 10); 49 r = kstrtoint(buf, 0, &enabled);
50 if (r)
51 return r;
52
53 enabled = !!enabled;
50 54
51 if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) { 55 if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
52 if (enabled) { 56 if (enabled) {
@@ -82,7 +86,9 @@ static ssize_t display_upd_mode_store(struct device *dev,
82 if (!dssdev->driver->set_update_mode) 86 if (!dssdev->driver->set_update_mode)
83 return -EINVAL; 87 return -EINVAL;
84 88
85 val = simple_strtoul(buf, NULL, 10); 89 r = kstrtoint(buf, 0, &val);
90 if (r)
91 return r;
86 92
87 switch (val) { 93 switch (val) {
88 case OMAP_DSS_UPDATE_DISABLED: 94 case OMAP_DSS_UPDATE_DISABLED:
@@ -114,13 +120,16 @@ static ssize_t display_tear_store(struct device *dev,
114 struct device_attribute *attr, const char *buf, size_t size) 120 struct device_attribute *attr, const char *buf, size_t size)
115{ 121{
116 struct omap_dss_device *dssdev = to_dss_device(dev); 122 struct omap_dss_device *dssdev = to_dss_device(dev);
117 unsigned long te; 123 int te, r;
118 int r;
119 124
120 if (!dssdev->driver->enable_te || !dssdev->driver->get_te) 125 if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
121 return -ENOENT; 126 return -ENOENT;
122 127
123 te = simple_strtoul(buf, NULL, 0); 128 r = kstrtoint(buf, 0, &te);
129 if (r)
130 return r;
131
132 te = !!te;
124 133
125 r = dssdev->driver->enable_te(dssdev, te); 134 r = dssdev->driver->enable_te(dssdev, te);
126 if (r) 135 if (r)
@@ -196,13 +205,14 @@ static ssize_t display_rotate_store(struct device *dev,
196 struct device_attribute *attr, const char *buf, size_t size) 205 struct device_attribute *attr, const char *buf, size_t size)
197{ 206{
198 struct omap_dss_device *dssdev = to_dss_device(dev); 207 struct omap_dss_device *dssdev = to_dss_device(dev);
199 unsigned long rot; 208 int rot, r;
200 int r;
201 209
202 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) 210 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
203 return -ENOENT; 211 return -ENOENT;
204 212
205 rot = simple_strtoul(buf, NULL, 0); 213 r = kstrtoint(buf, 0, &rot);
214 if (r)
215 return r;
206 216
207 r = dssdev->driver->set_rotate(dssdev, rot); 217 r = dssdev->driver->set_rotate(dssdev, rot);
208 if (r) 218 if (r)
@@ -226,13 +236,16 @@ static ssize_t display_mirror_store(struct device *dev,
226 struct device_attribute *attr, const char *buf, size_t size) 236 struct device_attribute *attr, const char *buf, size_t size)
227{ 237{
228 struct omap_dss_device *dssdev = to_dss_device(dev); 238 struct omap_dss_device *dssdev = to_dss_device(dev);
229 unsigned long mirror; 239 int mirror, r;
230 int r;
231 240
232 if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror) 241 if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
233 return -ENOENT; 242 return -ENOENT;
234 243
235 mirror = simple_strtoul(buf, NULL, 0); 244 r = kstrtoint(buf, 0, &mirror);
245 if (r)
246 return r;
247
248 mirror = !!mirror;
236 249
237 r = dssdev->driver->set_mirror(dssdev, mirror); 250 r = dssdev->driver->set_mirror(dssdev, mirror);
238 if (r) 251 if (r)
@@ -259,14 +272,15 @@ static ssize_t display_wss_store(struct device *dev,
259 struct device_attribute *attr, const char *buf, size_t size) 272 struct device_attribute *attr, const char *buf, size_t size)
260{ 273{
261 struct omap_dss_device *dssdev = to_dss_device(dev); 274 struct omap_dss_device *dssdev = to_dss_device(dev);
262 unsigned long wss; 275 u32 wss;
263 int r; 276 int r;
264 277
265 if (!dssdev->driver->get_wss || !dssdev->driver->set_wss) 278 if (!dssdev->driver->get_wss || !dssdev->driver->set_wss)
266 return -ENOENT; 279 return -ENOENT;
267 280
268 if (strict_strtoul(buf, 0, &wss)) 281 r = kstrtou32(buf, 0, &wss);
269 return -EINVAL; 282 if (r)
283 return r;
270 284
271 if (wss > 0xfffff) 285 if (wss > 0xfffff)
272 return -EINVAL; 286 return -EINVAL;
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 2d3ca4ca4a05..ff6bd30132df 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -30,16 +30,40 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/regulator/consumer.h> 31#include <linux/regulator/consumer.h>
32 32
33#include <plat/display.h> 33#include <video/omapdss.h>
34#include <plat/cpu.h> 34#include <plat/cpu.h>
35 35
36#include "dss.h" 36#include "dss.h"
37 37
38static struct { 38static struct {
39 struct regulator *vdds_dsi_reg; 39 struct regulator *vdds_dsi_reg;
40 struct platform_device *dsidev;
40} dpi; 41} dpi;
41 42
42#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 43static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk)
44{
45 int dsi_module;
46
47 dsi_module = clk == OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ? 0 : 1;
48
49 return dsi_get_dsidev_from_id(dsi_module);
50}
51
52static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev)
53{
54 if (dssdev->clocks.dispc.dispc_fclk_src ==
55 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ||
56 dssdev->clocks.dispc.dispc_fclk_src ==
57 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC ||
58 dssdev->clocks.dispc.channel.lcd_clk_src ==
59 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ||
60 dssdev->clocks.dispc.channel.lcd_clk_src ==
61 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC)
62 return true;
63 else
64 return false;
65}
66
43static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, 67static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
44 unsigned long pck_req, unsigned long *fck, int *lck_div, 68 unsigned long pck_req, unsigned long *fck, int *lck_div,
45 int *pck_div) 69 int *pck_div)
@@ -48,16 +72,16 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
48 struct dispc_clock_info dispc_cinfo; 72 struct dispc_clock_info dispc_cinfo;
49 int r; 73 int r;
50 74
51 r = dsi_pll_calc_clock_div_pck(is_tft, pck_req, &dsi_cinfo, 75 r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, pck_req,
52 &dispc_cinfo); 76 &dsi_cinfo, &dispc_cinfo);
53 if (r) 77 if (r)
54 return r; 78 return r;
55 79
56 r = dsi_pll_set_clock_div(&dsi_cinfo); 80 r = dsi_pll_set_clock_div(dpi.dsidev, &dsi_cinfo);
57 if (r) 81 if (r)
58 return r; 82 return r;
59 83
60 dss_select_dispc_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC); 84 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
61 85
62 r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); 86 r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
63 if (r) 87 if (r)
@@ -69,7 +93,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
69 93
70 return 0; 94 return 0;
71} 95}
72#else 96
73static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, 97static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
74 unsigned long pck_req, unsigned long *fck, int *lck_div, 98 unsigned long pck_req, unsigned long *fck, int *lck_div,
75 int *pck_div) 99 int *pck_div)
@@ -96,13 +120,12 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
96 120
97 return 0; 121 return 0;
98} 122}
99#endif
100 123
101static int dpi_set_mode(struct omap_dss_device *dssdev) 124static int dpi_set_mode(struct omap_dss_device *dssdev)
102{ 125{
103 struct omap_video_timings *t = &dssdev->panel.timings; 126 struct omap_video_timings *t = &dssdev->panel.timings;
104 int lck_div, pck_div; 127 int lck_div = 0, pck_div = 0;
105 unsigned long fck; 128 unsigned long fck = 0;
106 unsigned long pck; 129 unsigned long pck;
107 bool is_tft; 130 bool is_tft;
108 int r = 0; 131 int r = 0;
@@ -114,13 +137,12 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
114 137
115 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; 138 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
116 139
117#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 140 if (dpi_use_dsi_pll(dssdev))
118 r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck, 141 r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000,
119 &lck_div, &pck_div); 142 &fck, &lck_div, &pck_div);
120#else 143 else
121 r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck, 144 r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000,
122 &lck_div, &pck_div); 145 &fck, &lck_div, &pck_div);
123#endif
124 if (r) 146 if (r)
125 goto err0; 147 goto err0;
126 148
@@ -179,12 +201,13 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
179 if (r) 201 if (r)
180 goto err2; 202 goto err2;
181 203
182#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 204 if (dpi_use_dsi_pll(dssdev)) {
183 dss_clk_enable(DSS_CLK_SYSCK); 205 dss_clk_enable(DSS_CLK_SYSCK);
184 r = dsi_pll_init(dssdev, 0, 1); 206 r = dsi_pll_init(dpi.dsidev, 0, 1);
185 if (r) 207 if (r)
186 goto err3; 208 goto err3;
187#endif 209 }
210
188 r = dpi_set_mode(dssdev); 211 r = dpi_set_mode(dssdev);
189 if (r) 212 if (r)
190 goto err4; 213 goto err4;
@@ -196,11 +219,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
196 return 0; 219 return 0;
197 220
198err4: 221err4:
199#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 222 if (dpi_use_dsi_pll(dssdev))
200 dsi_pll_uninit(); 223 dsi_pll_uninit(dpi.dsidev, true);
201err3: 224err3:
202 dss_clk_disable(DSS_CLK_SYSCK); 225 if (dpi_use_dsi_pll(dssdev))
203#endif 226 dss_clk_disable(DSS_CLK_SYSCK);
204err2: 227err2:
205 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 228 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
206 if (cpu_is_omap34xx()) 229 if (cpu_is_omap34xx())
@@ -216,11 +239,11 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
216{ 239{
217 dssdev->manager->disable(dssdev->manager); 240 dssdev->manager->disable(dssdev->manager);
218 241
219#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 242 if (dpi_use_dsi_pll(dssdev)) {
220 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); 243 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
221 dsi_pll_uninit(); 244 dsi_pll_uninit(dpi.dsidev, true);
222 dss_clk_disable(DSS_CLK_SYSCK); 245 dss_clk_disable(DSS_CLK_SYSCK);
223#endif 246 }
224 247
225 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 248 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
226 249
@@ -251,6 +274,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
251 int lck_div, pck_div; 274 int lck_div, pck_div;
252 unsigned long fck; 275 unsigned long fck;
253 unsigned long pck; 276 unsigned long pck;
277 struct dispc_clock_info dispc_cinfo;
254 278
255 if (!dispc_lcd_timings_ok(timings)) 279 if (!dispc_lcd_timings_ok(timings))
256 return -EINVAL; 280 return -EINVAL;
@@ -260,11 +284,9 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
260 284
261 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; 285 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
262 286
263#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 287 if (dpi_use_dsi_pll(dssdev)) {
264 {
265 struct dsi_clock_info dsi_cinfo; 288 struct dsi_clock_info dsi_cinfo;
266 struct dispc_clock_info dispc_cinfo; 289 r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft,
267 r = dsi_pll_calc_clock_div_pck(is_tft,
268 timings->pixel_clock * 1000, 290 timings->pixel_clock * 1000,
269 &dsi_cinfo, &dispc_cinfo); 291 &dsi_cinfo, &dispc_cinfo);
270 292
@@ -272,13 +294,8 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
272 return r; 294 return r;
273 295
274 fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk; 296 fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
275 lck_div = dispc_cinfo.lck_div; 297 } else {
276 pck_div = dispc_cinfo.pck_div;
277 }
278#else
279 {
280 struct dss_clock_info dss_cinfo; 298 struct dss_clock_info dss_cinfo;
281 struct dispc_clock_info dispc_cinfo;
282 r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000, 299 r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000,
283 &dss_cinfo, &dispc_cinfo); 300 &dss_cinfo, &dispc_cinfo);
284 301
@@ -286,10 +303,10 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
286 return r; 303 return r;
287 304
288 fck = dss_cinfo.fck; 305 fck = dss_cinfo.fck;
289 lck_div = dispc_cinfo.lck_div;
290 pck_div = dispc_cinfo.pck_div;
291 } 306 }
292#endif 307
308 lck_div = dispc_cinfo.lck_div;
309 pck_div = dispc_cinfo.pck_div;
293 310
294 pck = fck / lck_div / pck_div / 1000; 311 pck = fck / lck_div / pck_div / 1000;
295 312
@@ -316,6 +333,12 @@ int dpi_init_display(struct omap_dss_device *dssdev)
316 dpi.vdds_dsi_reg = vdds_dsi; 333 dpi.vdds_dsi_reg = vdds_dsi;
317 } 334 }
318 335
336 if (dpi_use_dsi_pll(dssdev)) {
337 enum omap_dss_clk_source dispc_fclk_src =
338 dssdev->clocks.dispc.dispc_fclk_src;
339 dpi.dsidev = dpi_get_dsidev(dispc_fclk_src);
340 }
341
319 return 0; 342 return 0;
320} 343}
321 344
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 0a7f1a47f8e3..345757cfcbee 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -33,8 +33,11 @@
33#include <linux/regulator/consumer.h> 33#include <linux/regulator/consumer.h>
34#include <linux/wait.h> 34#include <linux/wait.h>
35#include <linux/workqueue.h> 35#include <linux/workqueue.h>
36#include <linux/sched.h>
37#include <linux/slab.h>
38#include <linux/debugfs.h>
36 39
37#include <plat/display.h> 40#include <video/omapdss.h>
38#include <plat/clock.h> 41#include <plat/clock.h>
39 42
40#include "dss.h" 43#include "dss.h"
@@ -56,6 +59,7 @@ struct dsi_reg { u16 idx; };
56#define DSI_IRQSTATUS DSI_REG(0x0018) 59#define DSI_IRQSTATUS DSI_REG(0x0018)
57#define DSI_IRQENABLE DSI_REG(0x001C) 60#define DSI_IRQENABLE DSI_REG(0x001C)
58#define DSI_CTRL DSI_REG(0x0040) 61#define DSI_CTRL DSI_REG(0x0040)
62#define DSI_GNQ DSI_REG(0x0044)
59#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048) 63#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
60#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C) 64#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
61#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050) 65#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
@@ -90,6 +94,7 @@ struct dsi_reg { u16 idx; };
90#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004) 94#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
91#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008) 95#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
92#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014) 96#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
97#define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028)
93 98
94/* DSI_PLL_CTRL_SCP */ 99/* DSI_PLL_CTRL_SCP */
95 100
@@ -99,11 +104,11 @@ struct dsi_reg { u16 idx; };
99#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C) 104#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
100#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010) 105#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
101 106
102#define REG_GET(idx, start, end) \ 107#define REG_GET(dsidev, idx, start, end) \
103 FLD_GET(dsi_read_reg(idx), start, end) 108 FLD_GET(dsi_read_reg(dsidev, idx), start, end)
104 109
105#define REG_FLD_MOD(idx, val, start, end) \ 110#define REG_FLD_MOD(dsidev, idx, val, start, end) \
106 dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end)) 111 dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
107 112
108/* Global interrupts */ 113/* Global interrupts */
109#define DSI_IRQ_VC0 (1 << 0) 114#define DSI_IRQ_VC0 (1 << 0)
@@ -147,31 +152,50 @@ struct dsi_reg { u16 idx; };
147#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0) 152#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
148#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1) 153#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
149#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2) 154#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
155#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
156#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
150#define DSI_CIO_IRQ_ERRESC1 (1 << 5) 157#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
151#define DSI_CIO_IRQ_ERRESC2 (1 << 6) 158#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
152#define DSI_CIO_IRQ_ERRESC3 (1 << 7) 159#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
160#define DSI_CIO_IRQ_ERRESC4 (1 << 8)
161#define DSI_CIO_IRQ_ERRESC5 (1 << 9)
153#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10) 162#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
154#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11) 163#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
155#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12) 164#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
165#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
166#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
156#define DSI_CIO_IRQ_STATEULPS1 (1 << 15) 167#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
157#define DSI_CIO_IRQ_STATEULPS2 (1 << 16) 168#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
158#define DSI_CIO_IRQ_STATEULPS3 (1 << 17) 169#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
170#define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
171#define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
159#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20) 172#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
160#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21) 173#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
161#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22) 174#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
162#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23) 175#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
163#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24) 176#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
164#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25) 177#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
178#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
179#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
180#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
181#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
165#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30) 182#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
166#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31) 183#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
167#define DSI_CIO_IRQ_ERROR_MASK \ 184#define DSI_CIO_IRQ_ERROR_MASK \
168 (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \ 185 (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
169 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \ 186 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
170 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRCONTROL1 | \ 187 DSI_CIO_IRQ_ERRSYNCESC5 | \
171 DSI_CIO_IRQ_ERRCONTROL2 | DSI_CIO_IRQ_ERRCONTROL3 | \ 188 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
189 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
190 DSI_CIO_IRQ_ERRESC5 | \
191 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
192 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
193 DSI_CIO_IRQ_ERRCONTROL5 | \
172 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \ 194 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
173 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \ 195 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
174 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3) 196 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
197 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
198 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
175 199
176#define DSI_DT_DCS_SHORT_WRITE_0 0x05 200#define DSI_DT_DCS_SHORT_WRITE_0 0x05
177#define DSI_DT_DCS_SHORT_WRITE_1 0x15 201#define DSI_DT_DCS_SHORT_WRITE_1 0x15
@@ -208,6 +232,19 @@ enum dsi_vc_mode {
208 DSI_VC_MODE_VP, 232 DSI_VC_MODE_VP,
209}; 233};
210 234
235enum dsi_lane {
236 DSI_CLK_P = 1 << 0,
237 DSI_CLK_N = 1 << 1,
238 DSI_DATA1_P = 1 << 2,
239 DSI_DATA1_N = 1 << 3,
240 DSI_DATA2_P = 1 << 4,
241 DSI_DATA2_N = 1 << 5,
242 DSI_DATA3_P = 1 << 6,
243 DSI_DATA3_N = 1 << 7,
244 DSI_DATA4_P = 1 << 8,
245 DSI_DATA4_N = 1 << 9,
246};
247
211struct dsi_update_region { 248struct dsi_update_region {
212 u16 x, y, w, h; 249 u16 x, y, w, h;
213 struct omap_dss_device *device; 250 struct omap_dss_device *device;
@@ -227,14 +264,16 @@ struct dsi_isr_tables {
227 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS]; 264 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
228}; 265};
229 266
230static struct 267struct dsi_data {
231{
232 struct platform_device *pdev; 268 struct platform_device *pdev;
233 void __iomem *base; 269 void __iomem *base;
234 int irq; 270 int irq;
235 271
272 void (*dsi_mux_pads)(bool enable);
273
236 struct dsi_clock_info current_cinfo; 274 struct dsi_clock_info current_cinfo;
237 275
276 bool vdds_dsi_enabled;
238 struct regulator *vdds_dsi_reg; 277 struct regulator *vdds_dsi_reg;
239 278
240 struct { 279 struct {
@@ -258,8 +297,7 @@ static struct
258 struct dsi_update_region update_region; 297 struct dsi_update_region update_region;
259 298
260 bool te_enabled; 299 bool te_enabled;
261 300 bool ulps_enabled;
262 struct workqueue_struct *workqueue;
263 301
264 void (*framedone_callback)(int, void *); 302 void (*framedone_callback)(int, void *);
265 void *framedone_data; 303 void *framedone_data;
@@ -292,21 +330,63 @@ static struct
292 unsigned long regm_dispc_max, regm_dsi_max; 330 unsigned long regm_dispc_max, regm_dsi_max;
293 unsigned long fint_min, fint_max; 331 unsigned long fint_min, fint_max;
294 unsigned long lpdiv_max; 332 unsigned long lpdiv_max;
295} dsi; 333
334 int num_data_lanes;
335
336 unsigned scp_clk_refcount;
337};
338
339struct dsi_packet_sent_handler_data {
340 struct platform_device *dsidev;
341 struct completion *completion;
342};
343
344static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
296 345
297#ifdef DEBUG 346#ifdef DEBUG
298static unsigned int dsi_perf; 347static unsigned int dsi_perf;
299module_param_named(dsi_perf, dsi_perf, bool, 0644); 348module_param_named(dsi_perf, dsi_perf, bool, 0644);
300#endif 349#endif
301 350
302static inline void dsi_write_reg(const struct dsi_reg idx, u32 val) 351static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
303{ 352{
304 __raw_writel(val, dsi.base + idx.idx); 353 return dev_get_drvdata(&dsidev->dev);
305} 354}
306 355
307static inline u32 dsi_read_reg(const struct dsi_reg idx) 356static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
308{ 357{
309 return __raw_readl(dsi.base + idx.idx); 358 return dsi_pdev_map[dssdev->phy.dsi.module];
359}
360
361struct platform_device *dsi_get_dsidev_from_id(int module)
362{
363 return dsi_pdev_map[module];
364}
365
366static int dsi_get_dsidev_id(struct platform_device *dsidev)
367{
368 /* TEMP: Pass 0 as the dsi module index till the time the dsi platform
369 * device names aren't changed to the form "omapdss_dsi.0",
370 * "omapdss_dsi.1" and so on */
371 BUG_ON(dsidev->id != -1);
372
373 return 0;
374}
375
376static inline void dsi_write_reg(struct platform_device *dsidev,
377 const struct dsi_reg idx, u32 val)
378{
379 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
380
381 __raw_writel(val, dsi->base + idx.idx);
382}
383
384static inline u32 dsi_read_reg(struct platform_device *dsidev,
385 const struct dsi_reg idx)
386{
387 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
388
389 return __raw_readl(dsi->base + idx.idx);
310} 390}
311 391
312 392
@@ -318,21 +398,29 @@ void dsi_restore_context(void)
318{ 398{
319} 399}
320 400
321void dsi_bus_lock(void) 401void dsi_bus_lock(struct omap_dss_device *dssdev)
322{ 402{
323 down(&dsi.bus_lock); 403 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
404 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
405
406 down(&dsi->bus_lock);
324} 407}
325EXPORT_SYMBOL(dsi_bus_lock); 408EXPORT_SYMBOL(dsi_bus_lock);
326 409
327void dsi_bus_unlock(void) 410void dsi_bus_unlock(struct omap_dss_device *dssdev)
328{ 411{
329 up(&dsi.bus_lock); 412 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
413 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
414
415 up(&dsi->bus_lock);
330} 416}
331EXPORT_SYMBOL(dsi_bus_unlock); 417EXPORT_SYMBOL(dsi_bus_unlock);
332 418
333static bool dsi_bus_is_locked(void) 419static bool dsi_bus_is_locked(struct platform_device *dsidev)
334{ 420{
335 return dsi.bus_lock.count == 0; 421 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
422
423 return dsi->bus_lock.count == 0;
336} 424}
337 425
338static void dsi_completion_handler(void *data, u32 mask) 426static void dsi_completion_handler(void *data, u32 mask)
@@ -340,12 +428,12 @@ static void dsi_completion_handler(void *data, u32 mask)
340 complete((struct completion *)data); 428 complete((struct completion *)data);
341} 429}
342 430
343static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum, 431static inline int wait_for_bit_change(struct platform_device *dsidev,
344 int value) 432 const struct dsi_reg idx, int bitnum, int value)
345{ 433{
346 int t = 100000; 434 int t = 100000;
347 435
348 while (REG_GET(idx, bitnum, bitnum) != value) { 436 while (REG_GET(dsidev, idx, bitnum, bitnum) != value) {
349 if (--t == 0) 437 if (--t == 0)
350 return !value; 438 return !value;
351 } 439 }
@@ -354,18 +442,21 @@ static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
354} 442}
355 443
356#ifdef DEBUG 444#ifdef DEBUG
357static void dsi_perf_mark_setup(void) 445static void dsi_perf_mark_setup(struct platform_device *dsidev)
358{ 446{
359 dsi.perf_setup_time = ktime_get(); 447 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
448 dsi->perf_setup_time = ktime_get();
360} 449}
361 450
362static void dsi_perf_mark_start(void) 451static void dsi_perf_mark_start(struct platform_device *dsidev)
363{ 452{
364 dsi.perf_start_time = ktime_get(); 453 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
454 dsi->perf_start_time = ktime_get();
365} 455}
366 456
367static void dsi_perf_show(const char *name) 457static void dsi_perf_show(struct platform_device *dsidev, const char *name)
368{ 458{
459 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
369 ktime_t t, setup_time, trans_time; 460 ktime_t t, setup_time, trans_time;
370 u32 total_bytes; 461 u32 total_bytes;
371 u32 setup_us, trans_us, total_us; 462 u32 setup_us, trans_us, total_us;
@@ -375,21 +466,21 @@ static void dsi_perf_show(const char *name)
375 466
376 t = ktime_get(); 467 t = ktime_get();
377 468
378 setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time); 469 setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
379 setup_us = (u32)ktime_to_us(setup_time); 470 setup_us = (u32)ktime_to_us(setup_time);
380 if (setup_us == 0) 471 if (setup_us == 0)
381 setup_us = 1; 472 setup_us = 1;
382 473
383 trans_time = ktime_sub(t, dsi.perf_start_time); 474 trans_time = ktime_sub(t, dsi->perf_start_time);
384 trans_us = (u32)ktime_to_us(trans_time); 475 trans_us = (u32)ktime_to_us(trans_time);
385 if (trans_us == 0) 476 if (trans_us == 0)
386 trans_us = 1; 477 trans_us = 1;
387 478
388 total_us = setup_us + trans_us; 479 total_us = setup_us + trans_us;
389 480
390 total_bytes = dsi.update_region.w * 481 total_bytes = dsi->update_region.w *
391 dsi.update_region.h * 482 dsi->update_region.h *
392 dsi.update_region.device->ctrl.pixel_size / 8; 483 dsi->update_region.device->ctrl.pixel_size / 8;
393 484
394 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), " 485 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
395 "%u bytes, %u kbytes/sec\n", 486 "%u bytes, %u kbytes/sec\n",
@@ -402,9 +493,9 @@ static void dsi_perf_show(const char *name)
402 total_bytes * 1000 / total_us); 493 total_bytes * 1000 / total_us);
403} 494}
404#else 495#else
405#define dsi_perf_mark_setup() 496#define dsi_perf_mark_setup(x)
406#define dsi_perf_mark_start() 497#define dsi_perf_mark_start(x)
407#define dsi_perf_show(x) 498#define dsi_perf_show(x, y)
408#endif 499#endif
409 500
410static void print_irq_status(u32 status) 501static void print_irq_status(u32 status)
@@ -510,38 +601,42 @@ static void print_irq_status_cio(u32 status)
510} 601}
511 602
512#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 603#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
513static void dsi_collect_irq_stats(u32 irqstatus, u32 *vcstatus, u32 ciostatus) 604static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
605 u32 *vcstatus, u32 ciostatus)
514{ 606{
607 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
515 int i; 608 int i;
516 609
517 spin_lock(&dsi.irq_stats_lock); 610 spin_lock(&dsi->irq_stats_lock);
518 611
519 dsi.irq_stats.irq_count++; 612 dsi->irq_stats.irq_count++;
520 dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs); 613 dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
521 614
522 for (i = 0; i < 4; ++i) 615 for (i = 0; i < 4; ++i)
523 dss_collect_irq_stats(vcstatus[i], dsi.irq_stats.vc_irqs[i]); 616 dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
524 617
525 dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs); 618 dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
526 619
527 spin_unlock(&dsi.irq_stats_lock); 620 spin_unlock(&dsi->irq_stats_lock);
528} 621}
529#else 622#else
530#define dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus) 623#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
531#endif 624#endif
532 625
533static int debug_irq; 626static int debug_irq;
534 627
535static void dsi_handle_irq_errors(u32 irqstatus, u32 *vcstatus, u32 ciostatus) 628static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
629 u32 *vcstatus, u32 ciostatus)
536{ 630{
631 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
537 int i; 632 int i;
538 633
539 if (irqstatus & DSI_IRQ_ERROR_MASK) { 634 if (irqstatus & DSI_IRQ_ERROR_MASK) {
540 DSSERR("DSI error, irqstatus %x\n", irqstatus); 635 DSSERR("DSI error, irqstatus %x\n", irqstatus);
541 print_irq_status(irqstatus); 636 print_irq_status(irqstatus);
542 spin_lock(&dsi.errors_lock); 637 spin_lock(&dsi->errors_lock);
543 dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK; 638 dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
544 spin_unlock(&dsi.errors_lock); 639 spin_unlock(&dsi->errors_lock);
545 } else if (debug_irq) { 640 } else if (debug_irq) {
546 print_irq_status(irqstatus); 641 print_irq_status(irqstatus);
547 } 642 }
@@ -602,22 +697,27 @@ static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
602 697
603static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) 698static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
604{ 699{
700 struct platform_device *dsidev;
701 struct dsi_data *dsi;
605 u32 irqstatus, vcstatus[4], ciostatus; 702 u32 irqstatus, vcstatus[4], ciostatus;
606 int i; 703 int i;
607 704
608 spin_lock(&dsi.irq_lock); 705 dsidev = (struct platform_device *) arg;
706 dsi = dsi_get_dsidrv_data(dsidev);
707
708 spin_lock(&dsi->irq_lock);
609 709
610 irqstatus = dsi_read_reg(DSI_IRQSTATUS); 710 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
611 711
612 /* IRQ is not for us */ 712 /* IRQ is not for us */
613 if (!irqstatus) { 713 if (!irqstatus) {
614 spin_unlock(&dsi.irq_lock); 714 spin_unlock(&dsi->irq_lock);
615 return IRQ_NONE; 715 return IRQ_NONE;
616 } 716 }
617 717
618 dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); 718 dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
619 /* flush posted write */ 719 /* flush posted write */
620 dsi_read_reg(DSI_IRQSTATUS); 720 dsi_read_reg(dsidev, DSI_IRQSTATUS);
621 721
622 for (i = 0; i < 4; ++i) { 722 for (i = 0; i < 4; ++i) {
623 if ((irqstatus & (1 << i)) == 0) { 723 if ((irqstatus & (1 << i)) == 0) {
@@ -625,45 +725,47 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
625 continue; 725 continue;
626 } 726 }
627 727
628 vcstatus[i] = dsi_read_reg(DSI_VC_IRQSTATUS(i)); 728 vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
629 729
630 dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus[i]); 730 dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
631 /* flush posted write */ 731 /* flush posted write */
632 dsi_read_reg(DSI_VC_IRQSTATUS(i)); 732 dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
633 } 733 }
634 734
635 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { 735 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
636 ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); 736 ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
637 737
638 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus); 738 dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
639 /* flush posted write */ 739 /* flush posted write */
640 dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); 740 dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
641 } else { 741 } else {
642 ciostatus = 0; 742 ciostatus = 0;
643 } 743 }
644 744
645#ifdef DSI_CATCH_MISSING_TE 745#ifdef DSI_CATCH_MISSING_TE
646 if (irqstatus & DSI_IRQ_TE_TRIGGER) 746 if (irqstatus & DSI_IRQ_TE_TRIGGER)
647 del_timer(&dsi.te_timer); 747 del_timer(&dsi->te_timer);
648#endif 748#endif
649 749
650 /* make a copy and unlock, so that isrs can unregister 750 /* make a copy and unlock, so that isrs can unregister
651 * themselves */ 751 * themselves */
652 memcpy(&dsi.isr_tables_copy, &dsi.isr_tables, sizeof(dsi.isr_tables)); 752 memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
753 sizeof(dsi->isr_tables));
653 754
654 spin_unlock(&dsi.irq_lock); 755 spin_unlock(&dsi->irq_lock);
655 756
656 dsi_handle_isrs(&dsi.isr_tables_copy, irqstatus, vcstatus, ciostatus); 757 dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
657 758
658 dsi_handle_irq_errors(irqstatus, vcstatus, ciostatus); 759 dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
659 760
660 dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus); 761 dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
661 762
662 return IRQ_HANDLED; 763 return IRQ_HANDLED;
663} 764}
664 765
665/* dsi.irq_lock has to be locked by the caller */ 766/* dsi->irq_lock has to be locked by the caller */
666static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array, 767static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
768 struct dsi_isr_data *isr_array,
667 unsigned isr_array_size, u32 default_mask, 769 unsigned isr_array_size, u32 default_mask,
668 const struct dsi_reg enable_reg, 770 const struct dsi_reg enable_reg,
669 const struct dsi_reg status_reg) 771 const struct dsi_reg status_reg)
@@ -684,61 +786,67 @@ static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array,
684 mask |= isr_data->mask; 786 mask |= isr_data->mask;
685 } 787 }
686 788
687 old_mask = dsi_read_reg(enable_reg); 789 old_mask = dsi_read_reg(dsidev, enable_reg);
688 /* clear the irqstatus for newly enabled irqs */ 790 /* clear the irqstatus for newly enabled irqs */
689 dsi_write_reg(status_reg, (mask ^ old_mask) & mask); 791 dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
690 dsi_write_reg(enable_reg, mask); 792 dsi_write_reg(dsidev, enable_reg, mask);
691 793
692 /* flush posted writes */ 794 /* flush posted writes */
693 dsi_read_reg(enable_reg); 795 dsi_read_reg(dsidev, enable_reg);
694 dsi_read_reg(status_reg); 796 dsi_read_reg(dsidev, status_reg);
695} 797}
696 798
697/* dsi.irq_lock has to be locked by the caller */ 799/* dsi->irq_lock has to be locked by the caller */
698static void _omap_dsi_set_irqs(void) 800static void _omap_dsi_set_irqs(struct platform_device *dsidev)
699{ 801{
802 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
700 u32 mask = DSI_IRQ_ERROR_MASK; 803 u32 mask = DSI_IRQ_ERROR_MASK;
701#ifdef DSI_CATCH_MISSING_TE 804#ifdef DSI_CATCH_MISSING_TE
702 mask |= DSI_IRQ_TE_TRIGGER; 805 mask |= DSI_IRQ_TE_TRIGGER;
703#endif 806#endif
704 _omap_dsi_configure_irqs(dsi.isr_tables.isr_table, 807 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
705 ARRAY_SIZE(dsi.isr_tables.isr_table), mask, 808 ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
706 DSI_IRQENABLE, DSI_IRQSTATUS); 809 DSI_IRQENABLE, DSI_IRQSTATUS);
707} 810}
708 811
709/* dsi.irq_lock has to be locked by the caller */ 812/* dsi->irq_lock has to be locked by the caller */
710static void _omap_dsi_set_irqs_vc(int vc) 813static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
711{ 814{
712 _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_vc[vc], 815 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
713 ARRAY_SIZE(dsi.isr_tables.isr_table_vc[vc]), 816
817 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
818 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
714 DSI_VC_IRQ_ERROR_MASK, 819 DSI_VC_IRQ_ERROR_MASK,
715 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc)); 820 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
716} 821}
717 822
718/* dsi.irq_lock has to be locked by the caller */ 823/* dsi->irq_lock has to be locked by the caller */
719static void _omap_dsi_set_irqs_cio(void) 824static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
720{ 825{
721 _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_cio, 826 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
722 ARRAY_SIZE(dsi.isr_tables.isr_table_cio), 827
828 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
829 ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
723 DSI_CIO_IRQ_ERROR_MASK, 830 DSI_CIO_IRQ_ERROR_MASK,
724 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS); 831 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
725} 832}
726 833
727static void _dsi_initialize_irq(void) 834static void _dsi_initialize_irq(struct platform_device *dsidev)
728{ 835{
836 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
729 unsigned long flags; 837 unsigned long flags;
730 int vc; 838 int vc;
731 839
732 spin_lock_irqsave(&dsi.irq_lock, flags); 840 spin_lock_irqsave(&dsi->irq_lock, flags);
733 841
734 memset(&dsi.isr_tables, 0, sizeof(dsi.isr_tables)); 842 memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
735 843
736 _omap_dsi_set_irqs(); 844 _omap_dsi_set_irqs(dsidev);
737 for (vc = 0; vc < 4; ++vc) 845 for (vc = 0; vc < 4; ++vc)
738 _omap_dsi_set_irqs_vc(vc); 846 _omap_dsi_set_irqs_vc(dsidev, vc);
739 _omap_dsi_set_irqs_cio(); 847 _omap_dsi_set_irqs_cio(dsidev);
740 848
741 spin_unlock_irqrestore(&dsi.irq_lock, flags); 849 spin_unlock_irqrestore(&dsi->irq_lock, flags);
742} 850}
743 851
744static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask, 852static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
@@ -797,126 +905,137 @@ static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
797 return -EINVAL; 905 return -EINVAL;
798} 906}
799 907
800static int dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask) 908static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
909 void *arg, u32 mask)
801{ 910{
911 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
802 unsigned long flags; 912 unsigned long flags;
803 int r; 913 int r;
804 914
805 spin_lock_irqsave(&dsi.irq_lock, flags); 915 spin_lock_irqsave(&dsi->irq_lock, flags);
806 916
807 r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table, 917 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
808 ARRAY_SIZE(dsi.isr_tables.isr_table)); 918 ARRAY_SIZE(dsi->isr_tables.isr_table));
809 919
810 if (r == 0) 920 if (r == 0)
811 _omap_dsi_set_irqs(); 921 _omap_dsi_set_irqs(dsidev);
812 922
813 spin_unlock_irqrestore(&dsi.irq_lock, flags); 923 spin_unlock_irqrestore(&dsi->irq_lock, flags);
814 924
815 return r; 925 return r;
816} 926}
817 927
818static int dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask) 928static int dsi_unregister_isr(struct platform_device *dsidev,
929 omap_dsi_isr_t isr, void *arg, u32 mask)
819{ 930{
931 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
820 unsigned long flags; 932 unsigned long flags;
821 int r; 933 int r;
822 934
823 spin_lock_irqsave(&dsi.irq_lock, flags); 935 spin_lock_irqsave(&dsi->irq_lock, flags);
824 936
825 r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table, 937 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
826 ARRAY_SIZE(dsi.isr_tables.isr_table)); 938 ARRAY_SIZE(dsi->isr_tables.isr_table));
827 939
828 if (r == 0) 940 if (r == 0)
829 _omap_dsi_set_irqs(); 941 _omap_dsi_set_irqs(dsidev);
830 942
831 spin_unlock_irqrestore(&dsi.irq_lock, flags); 943 spin_unlock_irqrestore(&dsi->irq_lock, flags);
832 944
833 return r; 945 return r;
834} 946}
835 947
836static int dsi_register_isr_vc(int channel, omap_dsi_isr_t isr, void *arg, 948static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
837 u32 mask) 949 omap_dsi_isr_t isr, void *arg, u32 mask)
838{ 950{
951 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
839 unsigned long flags; 952 unsigned long flags;
840 int r; 953 int r;
841 954
842 spin_lock_irqsave(&dsi.irq_lock, flags); 955 spin_lock_irqsave(&dsi->irq_lock, flags);
843 956
844 r = _dsi_register_isr(isr, arg, mask, 957 r = _dsi_register_isr(isr, arg, mask,
845 dsi.isr_tables.isr_table_vc[channel], 958 dsi->isr_tables.isr_table_vc[channel],
846 ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); 959 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
847 960
848 if (r == 0) 961 if (r == 0)
849 _omap_dsi_set_irqs_vc(channel); 962 _omap_dsi_set_irqs_vc(dsidev, channel);
850 963
851 spin_unlock_irqrestore(&dsi.irq_lock, flags); 964 spin_unlock_irqrestore(&dsi->irq_lock, flags);
852 965
853 return r; 966 return r;
854} 967}
855 968
856static int dsi_unregister_isr_vc(int channel, omap_dsi_isr_t isr, void *arg, 969static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
857 u32 mask) 970 omap_dsi_isr_t isr, void *arg, u32 mask)
858{ 971{
972 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
859 unsigned long flags; 973 unsigned long flags;
860 int r; 974 int r;
861 975
862 spin_lock_irqsave(&dsi.irq_lock, flags); 976 spin_lock_irqsave(&dsi->irq_lock, flags);
863 977
864 r = _dsi_unregister_isr(isr, arg, mask, 978 r = _dsi_unregister_isr(isr, arg, mask,
865 dsi.isr_tables.isr_table_vc[channel], 979 dsi->isr_tables.isr_table_vc[channel],
866 ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); 980 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
867 981
868 if (r == 0) 982 if (r == 0)
869 _omap_dsi_set_irqs_vc(channel); 983 _omap_dsi_set_irqs_vc(dsidev, channel);
870 984
871 spin_unlock_irqrestore(&dsi.irq_lock, flags); 985 spin_unlock_irqrestore(&dsi->irq_lock, flags);
872 986
873 return r; 987 return r;
874} 988}
875 989
876static int dsi_register_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask) 990static int dsi_register_isr_cio(struct platform_device *dsidev,
991 omap_dsi_isr_t isr, void *arg, u32 mask)
877{ 992{
993 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
878 unsigned long flags; 994 unsigned long flags;
879 int r; 995 int r;
880 996
881 spin_lock_irqsave(&dsi.irq_lock, flags); 997 spin_lock_irqsave(&dsi->irq_lock, flags);
882 998
883 r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio, 999 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
884 ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); 1000 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
885 1001
886 if (r == 0) 1002 if (r == 0)
887 _omap_dsi_set_irqs_cio(); 1003 _omap_dsi_set_irqs_cio(dsidev);
888 1004
889 spin_unlock_irqrestore(&dsi.irq_lock, flags); 1005 spin_unlock_irqrestore(&dsi->irq_lock, flags);
890 1006
891 return r; 1007 return r;
892} 1008}
893 1009
894static int dsi_unregister_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask) 1010static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1011 omap_dsi_isr_t isr, void *arg, u32 mask)
895{ 1012{
1013 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
896 unsigned long flags; 1014 unsigned long flags;
897 int r; 1015 int r;
898 1016
899 spin_lock_irqsave(&dsi.irq_lock, flags); 1017 spin_lock_irqsave(&dsi->irq_lock, flags);
900 1018
901 r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio, 1019 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
902 ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); 1020 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
903 1021
904 if (r == 0) 1022 if (r == 0)
905 _omap_dsi_set_irqs_cio(); 1023 _omap_dsi_set_irqs_cio(dsidev);
906 1024
907 spin_unlock_irqrestore(&dsi.irq_lock, flags); 1025 spin_unlock_irqrestore(&dsi->irq_lock, flags);
908 1026
909 return r; 1027 return r;
910} 1028}
911 1029
912static u32 dsi_get_errors(void) 1030static u32 dsi_get_errors(struct platform_device *dsidev)
913{ 1031{
1032 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
914 unsigned long flags; 1033 unsigned long flags;
915 u32 e; 1034 u32 e;
916 spin_lock_irqsave(&dsi.errors_lock, flags); 1035 spin_lock_irqsave(&dsi->errors_lock, flags);
917 e = dsi.errors; 1036 e = dsi->errors;
918 dsi.errors = 0; 1037 dsi->errors = 0;
919 spin_unlock_irqrestore(&dsi.errors_lock, flags); 1038 spin_unlock_irqrestore(&dsi->errors_lock, flags);
920 return e; 1039 return e;
921} 1040}
922 1041
@@ -930,23 +1049,27 @@ static inline void enable_clocks(bool enable)
930} 1049}
931 1050
932/* source clock for DSI PLL. this could also be PCLKFREE */ 1051/* source clock for DSI PLL. this could also be PCLKFREE */
933static inline void dsi_enable_pll_clock(bool enable) 1052static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1053 bool enable)
934{ 1054{
1055 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1056
935 if (enable) 1057 if (enable)
936 dss_clk_enable(DSS_CLK_SYSCK); 1058 dss_clk_enable(DSS_CLK_SYSCK);
937 else 1059 else
938 dss_clk_disable(DSS_CLK_SYSCK); 1060 dss_clk_disable(DSS_CLK_SYSCK);
939 1061
940 if (enable && dsi.pll_locked) { 1062 if (enable && dsi->pll_locked) {
941 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) 1063 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
942 DSSERR("cannot lock PLL when enabling clocks\n"); 1064 DSSERR("cannot lock PLL when enabling clocks\n");
943 } 1065 }
944} 1066}
945 1067
946#ifdef DEBUG 1068#ifdef DEBUG
947static void _dsi_print_reset_status(void) 1069static void _dsi_print_reset_status(struct platform_device *dsidev)
948{ 1070{
949 u32 l; 1071 u32 l;
1072 int b0, b1, b2;
950 1073
951 if (!dss_debug) 1074 if (!dss_debug)
952 return; 1075 return;
@@ -954,35 +1077,47 @@ static void _dsi_print_reset_status(void)
954 /* A dummy read using the SCP interface to any DSIPHY register is 1077 /* A dummy read using the SCP interface to any DSIPHY register is
955 * required after DSIPHY reset to complete the reset of the DSI complex 1078 * required after DSIPHY reset to complete the reset of the DSI complex
956 * I/O. */ 1079 * I/O. */
957 l = dsi_read_reg(DSI_DSIPHY_CFG5); 1080 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
958 1081
959 printk(KERN_DEBUG "DSI resets: "); 1082 printk(KERN_DEBUG "DSI resets: ");
960 1083
961 l = dsi_read_reg(DSI_PLL_STATUS); 1084 l = dsi_read_reg(dsidev, DSI_PLL_STATUS);
962 printk("PLL (%d) ", FLD_GET(l, 0, 0)); 1085 printk("PLL (%d) ", FLD_GET(l, 0, 0));
963 1086
964 l = dsi_read_reg(DSI_COMPLEXIO_CFG1); 1087 l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
965 printk("CIO (%d) ", FLD_GET(l, 29, 29)); 1088 printk("CIO (%d) ", FLD_GET(l, 29, 29));
966 1089
967 l = dsi_read_reg(DSI_DSIPHY_CFG5); 1090 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
968 printk("PHY (%x, %d, %d, %d)\n", 1091 b0 = 28;
969 FLD_GET(l, 28, 26), 1092 b1 = 27;
1093 b2 = 26;
1094 } else {
1095 b0 = 24;
1096 b1 = 25;
1097 b2 = 26;
1098 }
1099
1100 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1101 printk("PHY (%x%x%x, %d, %d, %d)\n",
1102 FLD_GET(l, b0, b0),
1103 FLD_GET(l, b1, b1),
1104 FLD_GET(l, b2, b2),
970 FLD_GET(l, 29, 29), 1105 FLD_GET(l, 29, 29),
971 FLD_GET(l, 30, 30), 1106 FLD_GET(l, 30, 30),
972 FLD_GET(l, 31, 31)); 1107 FLD_GET(l, 31, 31));
973} 1108}
974#else 1109#else
975#define _dsi_print_reset_status() 1110#define _dsi_print_reset_status(x)
976#endif 1111#endif
977 1112
978static inline int dsi_if_enable(bool enable) 1113static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
979{ 1114{
980 DSSDBG("dsi_if_enable(%d)\n", enable); 1115 DSSDBG("dsi_if_enable(%d)\n", enable);
981 1116
982 enable = enable ? 1 : 0; 1117 enable = enable ? 1 : 0;
983 REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */ 1118 REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
984 1119
985 if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) { 1120 if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
986 DSSERR("Failed to set dsi_if_enable to %d\n", enable); 1121 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
987 return -EIO; 1122 return -EIO;
988 } 1123 }
@@ -990,31 +1125,38 @@ static inline int dsi_if_enable(bool enable)
990 return 0; 1125 return 0;
991} 1126}
992 1127
993unsigned long dsi_get_pll_hsdiv_dispc_rate(void) 1128unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
994{ 1129{
995 return dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk; 1130 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1131
1132 return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
996} 1133}
997 1134
998static unsigned long dsi_get_pll_hsdiv_dsi_rate(void) 1135static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
999{ 1136{
1000 return dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk; 1137 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1138
1139 return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1001} 1140}
1002 1141
1003static unsigned long dsi_get_txbyteclkhs(void) 1142static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1004{ 1143{
1005 return dsi.current_cinfo.clkin4ddr / 16; 1144 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1145
1146 return dsi->current_cinfo.clkin4ddr / 16;
1006} 1147}
1007 1148
1008static unsigned long dsi_fclk_rate(void) 1149static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1009{ 1150{
1010 unsigned long r; 1151 unsigned long r;
1152 int dsi_module = dsi_get_dsidev_id(dsidev);
1011 1153
1012 if (dss_get_dsi_clk_source() == DSS_CLK_SRC_FCK) { 1154 if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
1013 /* DSI FCLK source is DSS_CLK_FCK */ 1155 /* DSI FCLK source is DSS_CLK_FCK */
1014 r = dss_clk_get_rate(DSS_CLK_FCK); 1156 r = dss_clk_get_rate(DSS_CLK_FCK);
1015 } else { 1157 } else {
1016 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ 1158 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1017 r = dsi_get_pll_hsdiv_dsi_rate(); 1159 r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1018 } 1160 }
1019 1161
1020 return r; 1162 return r;
@@ -1022,31 +1164,50 @@ static unsigned long dsi_fclk_rate(void)
1022 1164
1023static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev) 1165static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
1024{ 1166{
1167 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1168 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1025 unsigned long dsi_fclk; 1169 unsigned long dsi_fclk;
1026 unsigned lp_clk_div; 1170 unsigned lp_clk_div;
1027 unsigned long lp_clk; 1171 unsigned long lp_clk;
1028 1172
1029 lp_clk_div = dssdev->phy.dsi.div.lp_clk_div; 1173 lp_clk_div = dssdev->clocks.dsi.lp_clk_div;
1030 1174
1031 if (lp_clk_div == 0 || lp_clk_div > dsi.lpdiv_max) 1175 if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1032 return -EINVAL; 1176 return -EINVAL;
1033 1177
1034 dsi_fclk = dsi_fclk_rate(); 1178 dsi_fclk = dsi_fclk_rate(dsidev);
1035 1179
1036 lp_clk = dsi_fclk / 2 / lp_clk_div; 1180 lp_clk = dsi_fclk / 2 / lp_clk_div;
1037 1181
1038 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk); 1182 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1039 dsi.current_cinfo.lp_clk = lp_clk; 1183 dsi->current_cinfo.lp_clk = lp_clk;
1040 dsi.current_cinfo.lp_clk_div = lp_clk_div; 1184 dsi->current_cinfo.lp_clk_div = lp_clk_div;
1041 1185
1042 REG_FLD_MOD(DSI_CLK_CTRL, lp_clk_div, 12, 0); /* LP_CLK_DIVISOR */ 1186 /* LP_CLK_DIVISOR */
1187 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1043 1188
1044 REG_FLD_MOD(DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 1189 /* LP_RX_SYNCHRO_ENABLE */
1045 21, 21); /* LP_RX_SYNCHRO_ENABLE */ 1190 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1046 1191
1047 return 0; 1192 return 0;
1048} 1193}
1049 1194
1195static void dsi_enable_scp_clk(struct platform_device *dsidev)
1196{
1197 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1198
1199 if (dsi->scp_clk_refcount++ == 0)
1200 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1201}
1202
1203static void dsi_disable_scp_clk(struct platform_device *dsidev)
1204{
1205 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1206
1207 WARN_ON(dsi->scp_clk_refcount == 0);
1208 if (--dsi->scp_clk_refcount == 0)
1209 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1210}
1050 1211
1051enum dsi_pll_power_state { 1212enum dsi_pll_power_state {
1052 DSI_PLL_POWER_OFF = 0x0, 1213 DSI_PLL_POWER_OFF = 0x0,
@@ -1055,14 +1216,21 @@ enum dsi_pll_power_state {
1055 DSI_PLL_POWER_ON_DIV = 0x3, 1216 DSI_PLL_POWER_ON_DIV = 0x3,
1056}; 1217};
1057 1218
1058static int dsi_pll_power(enum dsi_pll_power_state state) 1219static int dsi_pll_power(struct platform_device *dsidev,
1220 enum dsi_pll_power_state state)
1059{ 1221{
1060 int t = 0; 1222 int t = 0;
1061 1223
1062 REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */ 1224 /* DSI-PLL power command 0x3 is not working */
1225 if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
1226 state == DSI_PLL_POWER_ON_DIV)
1227 state = DSI_PLL_POWER_ON_ALL;
1228
1229 /* PLL_PWR_CMD */
1230 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1063 1231
1064 /* PLL_PWR_STATUS */ 1232 /* PLL_PWR_STATUS */
1065 while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) { 1233 while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1066 if (++t > 1000) { 1234 if (++t > 1000) {
1067 DSSERR("Failed to set DSI PLL power mode to %d\n", 1235 DSSERR("Failed to set DSI PLL power mode to %d\n",
1068 state); 1236 state);
@@ -1078,16 +1246,19 @@ static int dsi_pll_power(enum dsi_pll_power_state state)
1078static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, 1246static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1079 struct dsi_clock_info *cinfo) 1247 struct dsi_clock_info *cinfo)
1080{ 1248{
1081 if (cinfo->regn == 0 || cinfo->regn > dsi.regn_max) 1249 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1250 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1251
1252 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1082 return -EINVAL; 1253 return -EINVAL;
1083 1254
1084 if (cinfo->regm == 0 || cinfo->regm > dsi.regm_max) 1255 if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1085 return -EINVAL; 1256 return -EINVAL;
1086 1257
1087 if (cinfo->regm_dispc > dsi.regm_dispc_max) 1258 if (cinfo->regm_dispc > dsi->regm_dispc_max)
1088 return -EINVAL; 1259 return -EINVAL;
1089 1260
1090 if (cinfo->regm_dsi > dsi.regm_dsi_max) 1261 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1091 return -EINVAL; 1262 return -EINVAL;
1092 1263
1093 if (cinfo->use_sys_clk) { 1264 if (cinfo->use_sys_clk) {
@@ -1106,7 +1277,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1106 1277
1107 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1)); 1278 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
1108 1279
1109 if (cinfo->fint > dsi.fint_max || cinfo->fint < dsi.fint_min) 1280 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1110 return -EINVAL; 1281 return -EINVAL;
1111 1282
1112 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint; 1283 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
@@ -1129,10 +1300,11 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1129 return 0; 1300 return 0;
1130} 1301}
1131 1302
1132int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck, 1303int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
1133 struct dsi_clock_info *dsi_cinfo, 1304 unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
1134 struct dispc_clock_info *dispc_cinfo) 1305 struct dispc_clock_info *dispc_cinfo)
1135{ 1306{
1307 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1136 struct dsi_clock_info cur, best; 1308 struct dsi_clock_info cur, best;
1137 struct dispc_clock_info best_dispc; 1309 struct dispc_clock_info best_dispc;
1138 int min_fck_per_pck; 1310 int min_fck_per_pck;
@@ -1143,10 +1315,10 @@ int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
1143 1315
1144 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); 1316 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1145 1317
1146 if (req_pck == dsi.cache_req_pck && 1318 if (req_pck == dsi->cache_req_pck &&
1147 dsi.cache_cinfo.clkin == dss_sys_clk) { 1319 dsi->cache_cinfo.clkin == dss_sys_clk) {
1148 DSSDBG("DSI clock info found from cache\n"); 1320 DSSDBG("DSI clock info found from cache\n");
1149 *dsi_cinfo = dsi.cache_cinfo; 1321 *dsi_cinfo = dsi->cache_cinfo;
1150 dispc_find_clk_divs(is_tft, req_pck, 1322 dispc_find_clk_divs(is_tft, req_pck,
1151 dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo); 1323 dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
1152 return 0; 1324 return 0;
@@ -1176,17 +1348,17 @@ retry:
1176 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */ 1348 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
1177 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */ 1349 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
1178 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */ 1350 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1179 for (cur.regn = 1; cur.regn < dsi.regn_max; ++cur.regn) { 1351 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1180 if (cur.highfreq == 0) 1352 if (cur.highfreq == 0)
1181 cur.fint = cur.clkin / cur.regn; 1353 cur.fint = cur.clkin / cur.regn;
1182 else 1354 else
1183 cur.fint = cur.clkin / (2 * cur.regn); 1355 cur.fint = cur.clkin / (2 * cur.regn);
1184 1356
1185 if (cur.fint > dsi.fint_max || cur.fint < dsi.fint_min) 1357 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1186 continue; 1358 continue;
1187 1359
1188 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */ 1360 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
1189 for (cur.regm = 1; cur.regm < dsi.regm_max; ++cur.regm) { 1361 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1190 unsigned long a, b; 1362 unsigned long a, b;
1191 1363
1192 a = 2 * cur.regm * (cur.clkin/1000); 1364 a = 2 * cur.regm * (cur.clkin/1000);
@@ -1198,8 +1370,8 @@ retry:
1198 1370
1199 /* dsi_pll_hsdiv_dispc_clk(MHz) = 1371 /* dsi_pll_hsdiv_dispc_clk(MHz) =
1200 * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */ 1372 * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
1201 for (cur.regm_dispc = 1; cur.regm_dispc < dsi.regm_dispc_max; 1373 for (cur.regm_dispc = 1; cur.regm_dispc <
1202 ++cur.regm_dispc) { 1374 dsi->regm_dispc_max; ++cur.regm_dispc) {
1203 struct dispc_clock_info cur_dispc; 1375 struct dispc_clock_info cur_dispc;
1204 cur.dsi_pll_hsdiv_dispc_clk = 1376 cur.dsi_pll_hsdiv_dispc_clk =
1205 cur.clkin4ddr / cur.regm_dispc; 1377 cur.clkin4ddr / cur.regm_dispc;
@@ -1259,34 +1431,39 @@ found:
1259 if (dispc_cinfo) 1431 if (dispc_cinfo)
1260 *dispc_cinfo = best_dispc; 1432 *dispc_cinfo = best_dispc;
1261 1433
1262 dsi.cache_req_pck = req_pck; 1434 dsi->cache_req_pck = req_pck;
1263 dsi.cache_clk_freq = 0; 1435 dsi->cache_clk_freq = 0;
1264 dsi.cache_cinfo = best; 1436 dsi->cache_cinfo = best;
1265 1437
1266 return 0; 1438 return 0;
1267} 1439}
1268 1440
1269int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) 1441int dsi_pll_set_clock_div(struct platform_device *dsidev,
1442 struct dsi_clock_info *cinfo)
1270{ 1443{
1444 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1271 int r = 0; 1445 int r = 0;
1272 u32 l; 1446 u32 l;
1273 int f; 1447 int f = 0;
1274 u8 regn_start, regn_end, regm_start, regm_end; 1448 u8 regn_start, regn_end, regm_start, regm_end;
1275 u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end; 1449 u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1276 1450
1277 DSSDBGF(); 1451 DSSDBGF();
1278 1452
1279 dsi.current_cinfo.fint = cinfo->fint; 1453 dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
1280 dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr; 1454 dsi->current_cinfo.highfreq = cinfo->highfreq;
1281 dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk = 1455
1456 dsi->current_cinfo.fint = cinfo->fint;
1457 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1458 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1282 cinfo->dsi_pll_hsdiv_dispc_clk; 1459 cinfo->dsi_pll_hsdiv_dispc_clk;
1283 dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk = 1460 dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1284 cinfo->dsi_pll_hsdiv_dsi_clk; 1461 cinfo->dsi_pll_hsdiv_dsi_clk;
1285 1462
1286 dsi.current_cinfo.regn = cinfo->regn; 1463 dsi->current_cinfo.regn = cinfo->regn;
1287 dsi.current_cinfo.regm = cinfo->regm; 1464 dsi->current_cinfo.regm = cinfo->regm;
1288 dsi.current_cinfo.regm_dispc = cinfo->regm_dispc; 1465 dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1289 dsi.current_cinfo.regm_dsi = cinfo->regm_dsi; 1466 dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1290 1467
1291 DSSDBG("DSI Fint %ld\n", cinfo->fint); 1468 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1292 1469
@@ -1309,12 +1486,12 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1309 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4); 1486 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1310 1487
1311 DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc, 1488 DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1312 dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), 1489 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1313 dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), 1490 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1314 cinfo->dsi_pll_hsdiv_dispc_clk); 1491 cinfo->dsi_pll_hsdiv_dispc_clk);
1315 DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi, 1492 DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1316 dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), 1493 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1317 dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), 1494 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1318 cinfo->dsi_pll_hsdiv_dsi_clk); 1495 cinfo->dsi_pll_hsdiv_dsi_clk);
1319 1496
1320 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end); 1497 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, &regn_start, &regn_end);
@@ -1324,9 +1501,10 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1324 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start, 1501 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, &regm_dsi_start,
1325 &regm_dsi_end); 1502 &regm_dsi_end);
1326 1503
1327 REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */ 1504 /* DSI_PLL_AUTOMODE = manual */
1505 REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1328 1506
1329 l = dsi_read_reg(DSI_PLL_CONFIGURATION1); 1507 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1330 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */ 1508 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1331 /* DSI_PLL_REGN */ 1509 /* DSI_PLL_REGN */
1332 l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end); 1510 l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
@@ -1338,22 +1516,22 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1338 /* DSIPROTO_CLOCK_DIV */ 1516 /* DSIPROTO_CLOCK_DIV */
1339 l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0, 1517 l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1340 regm_dsi_start, regm_dsi_end); 1518 regm_dsi_start, regm_dsi_end);
1341 dsi_write_reg(DSI_PLL_CONFIGURATION1, l); 1519 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1342 1520
1343 BUG_ON(cinfo->fint < dsi.fint_min || cinfo->fint > dsi.fint_max); 1521 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1344 if (cinfo->fint < 1000000) 1522
1345 f = 0x3; 1523 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1346 else if (cinfo->fint < 1250000) 1524 f = cinfo->fint < 1000000 ? 0x3 :
1347 f = 0x4; 1525 cinfo->fint < 1250000 ? 0x4 :
1348 else if (cinfo->fint < 1500000) 1526 cinfo->fint < 1500000 ? 0x5 :
1349 f = 0x5; 1527 cinfo->fint < 1750000 ? 0x6 :
1350 else if (cinfo->fint < 1750000) 1528 0x7;
1351 f = 0x6; 1529 }
1352 else 1530
1353 f = 0x7; 1531 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1354 1532
1355 l = dsi_read_reg(DSI_PLL_CONFIGURATION2); 1533 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
1356 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */ 1534 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1357 l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1, 1535 l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
1358 11, 11); /* DSI_PLL_CLKSEL */ 1536 11, 11); /* DSI_PLL_CLKSEL */
1359 l = FLD_MOD(l, cinfo->highfreq, 1537 l = FLD_MOD(l, cinfo->highfreq,
@@ -1361,25 +1539,25 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1361 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ 1539 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1362 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */ 1540 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1363 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */ 1541 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1364 dsi_write_reg(DSI_PLL_CONFIGURATION2, l); 1542 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1365 1543
1366 REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */ 1544 REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1367 1545
1368 if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) { 1546 if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1369 DSSERR("dsi pll go bit not going down.\n"); 1547 DSSERR("dsi pll go bit not going down.\n");
1370 r = -EIO; 1548 r = -EIO;
1371 goto err; 1549 goto err;
1372 } 1550 }
1373 1551
1374 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) { 1552 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1375 DSSERR("cannot lock PLL\n"); 1553 DSSERR("cannot lock PLL\n");
1376 r = -EIO; 1554 r = -EIO;
1377 goto err; 1555 goto err;
1378 } 1556 }
1379 1557
1380 dsi.pll_locked = 1; 1558 dsi->pll_locked = 1;
1381 1559
1382 l = dsi_read_reg(DSI_PLL_CONFIGURATION2); 1560 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1383 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */ 1561 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1384 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */ 1562 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1385 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */ 1563 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
@@ -1394,52 +1572,53 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
1394 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */ 1572 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1395 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */ 1573 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1396 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */ 1574 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1397 dsi_write_reg(DSI_PLL_CONFIGURATION2, l); 1575 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1398 1576
1399 DSSDBG("PLL config done\n"); 1577 DSSDBG("PLL config done\n");
1400err: 1578err:
1401 return r; 1579 return r;
1402} 1580}
1403 1581
1404int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk, 1582int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1405 bool enable_hsdiv) 1583 bool enable_hsdiv)
1406{ 1584{
1585 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1407 int r = 0; 1586 int r = 0;
1408 enum dsi_pll_power_state pwstate; 1587 enum dsi_pll_power_state pwstate;
1409 1588
1410 DSSDBG("PLL init\n"); 1589 DSSDBG("PLL init\n");
1411 1590
1412#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL 1591 if (dsi->vdds_dsi_reg == NULL) {
1413 /*
1414 * HACK: this is just a quick hack to get the USE_DSI_PLL
1415 * option working. USE_DSI_PLL is itself a big hack, and
1416 * should be removed.
1417 */
1418 if (dsi.vdds_dsi_reg == NULL) {
1419 struct regulator *vdds_dsi; 1592 struct regulator *vdds_dsi;
1420 1593
1421 vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi"); 1594 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
1422 1595
1423 if (IS_ERR(vdds_dsi)) { 1596 if (IS_ERR(vdds_dsi)) {
1424 DSSERR("can't get VDDS_DSI regulator\n"); 1597 DSSERR("can't get VDDS_DSI regulator\n");
1425 return PTR_ERR(vdds_dsi); 1598 return PTR_ERR(vdds_dsi);
1426 } 1599 }
1427 1600
1428 dsi.vdds_dsi_reg = vdds_dsi; 1601 dsi->vdds_dsi_reg = vdds_dsi;
1429 } 1602 }
1430#endif
1431 1603
1432 enable_clocks(1); 1604 enable_clocks(1);
1433 dsi_enable_pll_clock(1); 1605 dsi_enable_pll_clock(dsidev, 1);
1606 /*
1607 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1608 */
1609 dsi_enable_scp_clk(dsidev);
1434 1610
1435 r = regulator_enable(dsi.vdds_dsi_reg); 1611 if (!dsi->vdds_dsi_enabled) {
1436 if (r) 1612 r = regulator_enable(dsi->vdds_dsi_reg);
1437 goto err0; 1613 if (r)
1614 goto err0;
1615 dsi->vdds_dsi_enabled = true;
1616 }
1438 1617
1439 /* XXX PLL does not come out of reset without this... */ 1618 /* XXX PLL does not come out of reset without this... */
1440 dispc_pck_free_enable(1); 1619 dispc_pck_free_enable(1);
1441 1620
1442 if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) { 1621 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1443 DSSERR("PLL not coming out of reset.\n"); 1622 DSSERR("PLL not coming out of reset.\n");
1444 r = -ENODEV; 1623 r = -ENODEV;
1445 dispc_pck_free_enable(0); 1624 dispc_pck_free_enable(0);
@@ -1459,7 +1638,7 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
1459 else 1638 else
1460 pwstate = DSI_PLL_POWER_OFF; 1639 pwstate = DSI_PLL_POWER_OFF;
1461 1640
1462 r = dsi_pll_power(pwstate); 1641 r = dsi_pll_power(dsidev, pwstate);
1463 1642
1464 if (r) 1643 if (r)
1465 goto err1; 1644 goto err1;
@@ -1468,42 +1647,53 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
1468 1647
1469 return 0; 1648 return 0;
1470err1: 1649err1:
1471 regulator_disable(dsi.vdds_dsi_reg); 1650 if (dsi->vdds_dsi_enabled) {
1651 regulator_disable(dsi->vdds_dsi_reg);
1652 dsi->vdds_dsi_enabled = false;
1653 }
1472err0: 1654err0:
1655 dsi_disable_scp_clk(dsidev);
1473 enable_clocks(0); 1656 enable_clocks(0);
1474 dsi_enable_pll_clock(0); 1657 dsi_enable_pll_clock(dsidev, 0);
1475 return r; 1658 return r;
1476} 1659}
1477 1660
1478void dsi_pll_uninit(void) 1661void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1479{ 1662{
1663 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1664
1665 dsi->pll_locked = 0;
1666 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1667 if (disconnect_lanes) {
1668 WARN_ON(!dsi->vdds_dsi_enabled);
1669 regulator_disable(dsi->vdds_dsi_reg);
1670 dsi->vdds_dsi_enabled = false;
1671 }
1672
1673 dsi_disable_scp_clk(dsidev);
1480 enable_clocks(0); 1674 enable_clocks(0);
1481 dsi_enable_pll_clock(0); 1675 dsi_enable_pll_clock(dsidev, 0);
1482 1676
1483 dsi.pll_locked = 0;
1484 dsi_pll_power(DSI_PLL_POWER_OFF);
1485 regulator_disable(dsi.vdds_dsi_reg);
1486 DSSDBG("PLL uninit done\n"); 1677 DSSDBG("PLL uninit done\n");
1487} 1678}
1488 1679
1489void dsi_dump_clocks(struct seq_file *s) 1680static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1681 struct seq_file *s)
1490{ 1682{
1491 int clksel; 1683 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1492 struct dsi_clock_info *cinfo = &dsi.current_cinfo; 1684 struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1493 enum dss_clk_source dispc_clk_src, dsi_clk_src; 1685 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1686 int dsi_module = dsi_get_dsidev_id(dsidev);
1494 1687
1495 dispc_clk_src = dss_get_dispc_clk_source(); 1688 dispc_clk_src = dss_get_dispc_clk_source();
1496 dsi_clk_src = dss_get_dsi_clk_source(); 1689 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1497 1690
1498 enable_clocks(1); 1691 enable_clocks(1);
1499 1692
1500 clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11); 1693 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1501
1502 seq_printf(s, "- DSI PLL -\n");
1503 1694
1504 seq_printf(s, "dsi pll source = %s\n", 1695 seq_printf(s, "dsi pll source = %s\n",
1505 clksel == 0 ? 1696 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
1506 "dss_sys_clk" : "pclkfree");
1507 1697
1508 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn); 1698 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1509 1699
@@ -1515,7 +1705,7 @@ void dsi_dump_clocks(struct seq_file *s)
1515 dss_feat_get_clk_source_name(dispc_clk_src), 1705 dss_feat_get_clk_source_name(dispc_clk_src),
1516 cinfo->dsi_pll_hsdiv_dispc_clk, 1706 cinfo->dsi_pll_hsdiv_dispc_clk,
1517 cinfo->regm_dispc, 1707 cinfo->regm_dispc,
1518 dispc_clk_src == DSS_CLK_SRC_FCK ? 1708 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1519 "off" : "on"); 1709 "off" : "on");
1520 1710
1521 seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n", 1711 seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
@@ -1523,45 +1713,55 @@ void dsi_dump_clocks(struct seq_file *s)
1523 dss_feat_get_clk_source_name(dsi_clk_src), 1713 dss_feat_get_clk_source_name(dsi_clk_src),
1524 cinfo->dsi_pll_hsdiv_dsi_clk, 1714 cinfo->dsi_pll_hsdiv_dsi_clk,
1525 cinfo->regm_dsi, 1715 cinfo->regm_dsi,
1526 dsi_clk_src == DSS_CLK_SRC_FCK ? 1716 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1527 "off" : "on"); 1717 "off" : "on");
1528 1718
1529 seq_printf(s, "- DSI -\n"); 1719 seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1530 1720
1531 seq_printf(s, "dsi fclk source = %s (%s)\n", 1721 seq_printf(s, "dsi fclk source = %s (%s)\n",
1532 dss_get_generic_clk_source_name(dsi_clk_src), 1722 dss_get_generic_clk_source_name(dsi_clk_src),
1533 dss_feat_get_clk_source_name(dsi_clk_src)); 1723 dss_feat_get_clk_source_name(dsi_clk_src));
1534 1724
1535 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate()); 1725 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1536 1726
1537 seq_printf(s, "DDR_CLK\t\t%lu\n", 1727 seq_printf(s, "DDR_CLK\t\t%lu\n",
1538 cinfo->clkin4ddr / 4); 1728 cinfo->clkin4ddr / 4);
1539 1729
1540 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs()); 1730 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1541 1731
1542 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk); 1732 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1543 1733
1544 seq_printf(s, "VP_CLK\t\t%lu\n"
1545 "VP_PCLK\t\t%lu\n",
1546 dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD),
1547 dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD));
1548
1549 enable_clocks(0); 1734 enable_clocks(0);
1550} 1735}
1551 1736
1737void dsi_dump_clocks(struct seq_file *s)
1738{
1739 struct platform_device *dsidev;
1740 int i;
1741
1742 for (i = 0; i < MAX_NUM_DSI; i++) {
1743 dsidev = dsi_get_dsidev_from_id(i);
1744 if (dsidev)
1745 dsi_dump_dsidev_clocks(dsidev, s);
1746 }
1747}
1748
1552#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 1749#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1553void dsi_dump_irqs(struct seq_file *s) 1750static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1751 struct seq_file *s)
1554{ 1752{
1753 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1555 unsigned long flags; 1754 unsigned long flags;
1556 struct dsi_irq_stats stats; 1755 struct dsi_irq_stats stats;
1756 int dsi_module = dsi_get_dsidev_id(dsidev);
1557 1757
1558 spin_lock_irqsave(&dsi.irq_stats_lock, flags); 1758 spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1559 1759
1560 stats = dsi.irq_stats; 1760 stats = dsi->irq_stats;
1561 memset(&dsi.irq_stats, 0, sizeof(dsi.irq_stats)); 1761 memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1562 dsi.irq_stats.last_reset = jiffies; 1762 dsi->irq_stats.last_reset = jiffies;
1563 1763
1564 spin_unlock_irqrestore(&dsi.irq_stats_lock, flags); 1764 spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1565 1765
1566 seq_printf(s, "period %u ms\n", 1766 seq_printf(s, "period %u ms\n",
1567 jiffies_to_msecs(jiffies - stats.last_reset)); 1767 jiffies_to_msecs(jiffies - stats.last_reset));
@@ -1570,7 +1770,7 @@ void dsi_dump_irqs(struct seq_file *s)
1570#define PIS(x) \ 1770#define PIS(x) \
1571 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); 1771 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1572 1772
1573 seq_printf(s, "-- DSI interrupts --\n"); 1773 seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
1574 PIS(VC0); 1774 PIS(VC0);
1575 PIS(VC1); 1775 PIS(VC1);
1576 PIS(VC2); 1776 PIS(VC2);
@@ -1636,13 +1836,45 @@ void dsi_dump_irqs(struct seq_file *s)
1636 PIS(ULPSACTIVENOT_ALL1); 1836 PIS(ULPSACTIVENOT_ALL1);
1637#undef PIS 1837#undef PIS
1638} 1838}
1839
1840static void dsi1_dump_irqs(struct seq_file *s)
1841{
1842 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1843
1844 dsi_dump_dsidev_irqs(dsidev, s);
1845}
1846
1847static void dsi2_dump_irqs(struct seq_file *s)
1848{
1849 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1850
1851 dsi_dump_dsidev_irqs(dsidev, s);
1852}
1853
1854void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
1855 const struct file_operations *debug_fops)
1856{
1857 struct platform_device *dsidev;
1858
1859 dsidev = dsi_get_dsidev_from_id(0);
1860 if (dsidev)
1861 debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
1862 &dsi1_dump_irqs, debug_fops);
1863
1864 dsidev = dsi_get_dsidev_from_id(1);
1865 if (dsidev)
1866 debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
1867 &dsi2_dump_irqs, debug_fops);
1868}
1639#endif 1869#endif
1640 1870
1641void dsi_dump_regs(struct seq_file *s) 1871static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
1872 struct seq_file *s)
1642{ 1873{
1643#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r)) 1874#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
1644 1875
1645 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 1876 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
1877 dsi_enable_scp_clk(dsidev);
1646 1878
1647 DUMPREG(DSI_REVISION); 1879 DUMPREG(DSI_REVISION);
1648 DUMPREG(DSI_SYSCONFIG); 1880 DUMPREG(DSI_SYSCONFIG);
@@ -1714,25 +1946,57 @@ void dsi_dump_regs(struct seq_file *s)
1714 DUMPREG(DSI_PLL_CONFIGURATION1); 1946 DUMPREG(DSI_PLL_CONFIGURATION1);
1715 DUMPREG(DSI_PLL_CONFIGURATION2); 1947 DUMPREG(DSI_PLL_CONFIGURATION2);
1716 1948
1949 dsi_disable_scp_clk(dsidev);
1717 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 1950 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
1718#undef DUMPREG 1951#undef DUMPREG
1719} 1952}
1720 1953
1721enum dsi_complexio_power_state { 1954static void dsi1_dump_regs(struct seq_file *s)
1955{
1956 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
1957
1958 dsi_dump_dsidev_regs(dsidev, s);
1959}
1960
1961static void dsi2_dump_regs(struct seq_file *s)
1962{
1963 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
1964
1965 dsi_dump_dsidev_regs(dsidev, s);
1966}
1967
1968void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
1969 const struct file_operations *debug_fops)
1970{
1971 struct platform_device *dsidev;
1972
1973 dsidev = dsi_get_dsidev_from_id(0);
1974 if (dsidev)
1975 debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
1976 &dsi1_dump_regs, debug_fops);
1977
1978 dsidev = dsi_get_dsidev_from_id(1);
1979 if (dsidev)
1980 debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
1981 &dsi2_dump_regs, debug_fops);
1982}
1983enum dsi_cio_power_state {
1722 DSI_COMPLEXIO_POWER_OFF = 0x0, 1984 DSI_COMPLEXIO_POWER_OFF = 0x0,
1723 DSI_COMPLEXIO_POWER_ON = 0x1, 1985 DSI_COMPLEXIO_POWER_ON = 0x1,
1724 DSI_COMPLEXIO_POWER_ULPS = 0x2, 1986 DSI_COMPLEXIO_POWER_ULPS = 0x2,
1725}; 1987};
1726 1988
1727static int dsi_complexio_power(enum dsi_complexio_power_state state) 1989static int dsi_cio_power(struct platform_device *dsidev,
1990 enum dsi_cio_power_state state)
1728{ 1991{
1729 int t = 0; 1992 int t = 0;
1730 1993
1731 /* PWR_CMD */ 1994 /* PWR_CMD */
1732 REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27); 1995 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
1733 1996
1734 /* PWR_STATUS */ 1997 /* PWR_STATUS */
1735 while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) { 1998 while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
1999 26, 25) != state) {
1736 if (++t > 1000) { 2000 if (++t > 1000) {
1737 DSSERR("failed to set complexio power state to " 2001 DSSERR("failed to set complexio power state to "
1738 "%d\n", state); 2002 "%d\n", state);
@@ -1744,9 +2008,70 @@ static int dsi_complexio_power(enum dsi_complexio_power_state state)
1744 return 0; 2008 return 0;
1745} 2009}
1746 2010
1747static void dsi_complexio_config(struct omap_dss_device *dssdev) 2011/* Number of data lanes present on DSI interface */
2012static inline int dsi_get_num_data_lanes(struct platform_device *dsidev)
1748{ 2013{
2014 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
2015 * of data lanes as 2 by default */
2016 if (dss_has_feature(FEAT_DSI_GNQ))
2017 return REG_GET(dsidev, DSI_GNQ, 11, 9); /* NB_DATA_LANES */
2018 else
2019 return 2;
2020}
2021
2022/* Number of data lanes used by the dss device */
2023static inline int dsi_get_num_data_lanes_dssdev(struct omap_dss_device *dssdev)
2024{
2025 int num_data_lanes = 0;
2026
2027 if (dssdev->phy.dsi.data1_lane != 0)
2028 num_data_lanes++;
2029 if (dssdev->phy.dsi.data2_lane != 0)
2030 num_data_lanes++;
2031 if (dssdev->phy.dsi.data3_lane != 0)
2032 num_data_lanes++;
2033 if (dssdev->phy.dsi.data4_lane != 0)
2034 num_data_lanes++;
2035
2036 return num_data_lanes;
2037}
2038
2039static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2040{
2041 int val;
2042
2043 /* line buffer on OMAP3 is 1024 x 24bits */
2044 /* XXX: for some reason using full buffer size causes
2045 * considerable TX slowdown with update sizes that fill the
2046 * whole buffer */
2047 if (!dss_has_feature(FEAT_DSI_GNQ))
2048 return 1023 * 3;
2049
2050 val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
2051
2052 switch (val) {
2053 case 1:
2054 return 512 * 3; /* 512x24 bits */
2055 case 2:
2056 return 682 * 3; /* 682x24 bits */
2057 case 3:
2058 return 853 * 3; /* 853x24 bits */
2059 case 4:
2060 return 1024 * 3; /* 1024x24 bits */
2061 case 5:
2062 return 1194 * 3; /* 1194x24 bits */
2063 case 6:
2064 return 1365 * 3; /* 1365x24 bits */
2065 default:
2066 BUG();
2067 }
2068}
2069
2070static void dsi_set_lane_config(struct omap_dss_device *dssdev)
2071{
2072 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1749 u32 r; 2073 u32 r;
2074 int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
1750 2075
1751 int clk_lane = dssdev->phy.dsi.clk_lane; 2076 int clk_lane = dssdev->phy.dsi.clk_lane;
1752 int data1_lane = dssdev->phy.dsi.data1_lane; 2077 int data1_lane = dssdev->phy.dsi.data1_lane;
@@ -1755,14 +2080,28 @@ static void dsi_complexio_config(struct omap_dss_device *dssdev)
1755 int data1_pol = dssdev->phy.dsi.data1_pol; 2080 int data1_pol = dssdev->phy.dsi.data1_pol;
1756 int data2_pol = dssdev->phy.dsi.data2_pol; 2081 int data2_pol = dssdev->phy.dsi.data2_pol;
1757 2082
1758 r = dsi_read_reg(DSI_COMPLEXIO_CFG1); 2083 r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
1759 r = FLD_MOD(r, clk_lane, 2, 0); 2084 r = FLD_MOD(r, clk_lane, 2, 0);
1760 r = FLD_MOD(r, clk_pol, 3, 3); 2085 r = FLD_MOD(r, clk_pol, 3, 3);
1761 r = FLD_MOD(r, data1_lane, 6, 4); 2086 r = FLD_MOD(r, data1_lane, 6, 4);
1762 r = FLD_MOD(r, data1_pol, 7, 7); 2087 r = FLD_MOD(r, data1_pol, 7, 7);
1763 r = FLD_MOD(r, data2_lane, 10, 8); 2088 r = FLD_MOD(r, data2_lane, 10, 8);
1764 r = FLD_MOD(r, data2_pol, 11, 11); 2089 r = FLD_MOD(r, data2_pol, 11, 11);
1765 dsi_write_reg(DSI_COMPLEXIO_CFG1, r); 2090 if (num_data_lanes_dssdev > 2) {
2091 int data3_lane = dssdev->phy.dsi.data3_lane;
2092 int data3_pol = dssdev->phy.dsi.data3_pol;
2093
2094 r = FLD_MOD(r, data3_lane, 14, 12);
2095 r = FLD_MOD(r, data3_pol, 15, 15);
2096 }
2097 if (num_data_lanes_dssdev > 3) {
2098 int data4_lane = dssdev->phy.dsi.data4_lane;
2099 int data4_pol = dssdev->phy.dsi.data4_pol;
2100
2101 r = FLD_MOD(r, data4_lane, 18, 16);
2102 r = FLD_MOD(r, data4_pol, 19, 19);
2103 }
2104 dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
1766 2105
1767 /* The configuration of the DSI complex I/O (number of data lanes, 2106 /* The configuration of the DSI complex I/O (number of data lanes,
1768 position, differential order) should not be changed while 2107 position, differential order) should not be changed while
@@ -1776,27 +2115,31 @@ static void dsi_complexio_config(struct omap_dss_device *dssdev)
1776 DSI complex I/O configuration is unknown. */ 2115 DSI complex I/O configuration is unknown. */
1777 2116
1778 /* 2117 /*
1779 REG_FLD_MOD(DSI_CTRL, 1, 0, 0); 2118 REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
1780 REG_FLD_MOD(DSI_CTRL, 0, 0, 0); 2119 REG_FLD_MOD(dsidev, DSI_CTRL, 0, 0, 0);
1781 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); 2120 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20);
1782 REG_FLD_MOD(DSI_CTRL, 1, 0, 0); 2121 REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
1783 */ 2122 */
1784} 2123}
1785 2124
1786static inline unsigned ns2ddr(unsigned ns) 2125static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
1787{ 2126{
2127 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2128
1788 /* convert time in ns to ddr ticks, rounding up */ 2129 /* convert time in ns to ddr ticks, rounding up */
1789 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; 2130 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
1790 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000; 2131 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
1791} 2132}
1792 2133
1793static inline unsigned ddr2ns(unsigned ddr) 2134static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
1794{ 2135{
1795 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; 2136 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2137
2138 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
1796 return ddr * 1000 * 1000 / (ddr_clk / 1000); 2139 return ddr * 1000 * 1000 / (ddr_clk / 1000);
1797} 2140}
1798 2141
1799static void dsi_complexio_timings(void) 2142static void dsi_cio_timings(struct platform_device *dsidev)
1800{ 2143{
1801 u32 r; 2144 u32 r;
1802 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit; 2145 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
@@ -1808,139 +2151,323 @@ static void dsi_complexio_timings(void)
1808 /* 1 * DDR_CLK = 2 * UI */ 2151 /* 1 * DDR_CLK = 2 * UI */
1809 2152
1810 /* min 40ns + 4*UI max 85ns + 6*UI */ 2153 /* min 40ns + 4*UI max 85ns + 6*UI */
1811 ths_prepare = ns2ddr(70) + 2; 2154 ths_prepare = ns2ddr(dsidev, 70) + 2;
1812 2155
1813 /* min 145ns + 10*UI */ 2156 /* min 145ns + 10*UI */
1814 ths_prepare_ths_zero = ns2ddr(175) + 2; 2157 ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
1815 2158
1816 /* min max(8*UI, 60ns+4*UI) */ 2159 /* min max(8*UI, 60ns+4*UI) */
1817 ths_trail = ns2ddr(60) + 5; 2160 ths_trail = ns2ddr(dsidev, 60) + 5;
1818 2161
1819 /* min 100ns */ 2162 /* min 100ns */
1820 ths_exit = ns2ddr(145); 2163 ths_exit = ns2ddr(dsidev, 145);
1821 2164
1822 /* tlpx min 50n */ 2165 /* tlpx min 50n */
1823 tlpx_half = ns2ddr(25); 2166 tlpx_half = ns2ddr(dsidev, 25);
1824 2167
1825 /* min 60ns */ 2168 /* min 60ns */
1826 tclk_trail = ns2ddr(60) + 2; 2169 tclk_trail = ns2ddr(dsidev, 60) + 2;
1827 2170
1828 /* min 38ns, max 95ns */ 2171 /* min 38ns, max 95ns */
1829 tclk_prepare = ns2ddr(65); 2172 tclk_prepare = ns2ddr(dsidev, 65);
1830 2173
1831 /* min tclk-prepare + tclk-zero = 300ns */ 2174 /* min tclk-prepare + tclk-zero = 300ns */
1832 tclk_zero = ns2ddr(260); 2175 tclk_zero = ns2ddr(dsidev, 260);
1833 2176
1834 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n", 2177 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
1835 ths_prepare, ddr2ns(ths_prepare), 2178 ths_prepare, ddr2ns(dsidev, ths_prepare),
1836 ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero)); 2179 ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
1837 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n", 2180 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
1838 ths_trail, ddr2ns(ths_trail), 2181 ths_trail, ddr2ns(dsidev, ths_trail),
1839 ths_exit, ddr2ns(ths_exit)); 2182 ths_exit, ddr2ns(dsidev, ths_exit));
1840 2183
1841 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), " 2184 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
1842 "tclk_zero %u (%uns)\n", 2185 "tclk_zero %u (%uns)\n",
1843 tlpx_half, ddr2ns(tlpx_half), 2186 tlpx_half, ddr2ns(dsidev, tlpx_half),
1844 tclk_trail, ddr2ns(tclk_trail), 2187 tclk_trail, ddr2ns(dsidev, tclk_trail),
1845 tclk_zero, ddr2ns(tclk_zero)); 2188 tclk_zero, ddr2ns(dsidev, tclk_zero));
1846 DSSDBG("tclk_prepare %u (%uns)\n", 2189 DSSDBG("tclk_prepare %u (%uns)\n",
1847 tclk_prepare, ddr2ns(tclk_prepare)); 2190 tclk_prepare, ddr2ns(dsidev, tclk_prepare));
1848 2191
1849 /* program timings */ 2192 /* program timings */
1850 2193
1851 r = dsi_read_reg(DSI_DSIPHY_CFG0); 2194 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
1852 r = FLD_MOD(r, ths_prepare, 31, 24); 2195 r = FLD_MOD(r, ths_prepare, 31, 24);
1853 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16); 2196 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
1854 r = FLD_MOD(r, ths_trail, 15, 8); 2197 r = FLD_MOD(r, ths_trail, 15, 8);
1855 r = FLD_MOD(r, ths_exit, 7, 0); 2198 r = FLD_MOD(r, ths_exit, 7, 0);
1856 dsi_write_reg(DSI_DSIPHY_CFG0, r); 2199 dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
1857 2200
1858 r = dsi_read_reg(DSI_DSIPHY_CFG1); 2201 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
1859 r = FLD_MOD(r, tlpx_half, 22, 16); 2202 r = FLD_MOD(r, tlpx_half, 22, 16);
1860 r = FLD_MOD(r, tclk_trail, 15, 8); 2203 r = FLD_MOD(r, tclk_trail, 15, 8);
1861 r = FLD_MOD(r, tclk_zero, 7, 0); 2204 r = FLD_MOD(r, tclk_zero, 7, 0);
1862 dsi_write_reg(DSI_DSIPHY_CFG1, r); 2205 dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
1863 2206
1864 r = dsi_read_reg(DSI_DSIPHY_CFG2); 2207 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
1865 r = FLD_MOD(r, tclk_prepare, 7, 0); 2208 r = FLD_MOD(r, tclk_prepare, 7, 0);
1866 dsi_write_reg(DSI_DSIPHY_CFG2, r); 2209 dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
1867} 2210}
1868 2211
2212static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev,
2213 enum dsi_lane lanes)
2214{
2215 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2216 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2217 int clk_lane = dssdev->phy.dsi.clk_lane;
2218 int data1_lane = dssdev->phy.dsi.data1_lane;
2219 int data2_lane = dssdev->phy.dsi.data2_lane;
2220 int data3_lane = dssdev->phy.dsi.data3_lane;
2221 int data4_lane = dssdev->phy.dsi.data4_lane;
2222 int clk_pol = dssdev->phy.dsi.clk_pol;
2223 int data1_pol = dssdev->phy.dsi.data1_pol;
2224 int data2_pol = dssdev->phy.dsi.data2_pol;
2225 int data3_pol = dssdev->phy.dsi.data3_pol;
2226 int data4_pol = dssdev->phy.dsi.data4_pol;
2227
2228 u32 l = 0;
2229 u8 lptxscp_start = dsi->num_data_lanes == 2 ? 22 : 26;
2230
2231 if (lanes & DSI_CLK_P)
2232 l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 0 : 1));
2233 if (lanes & DSI_CLK_N)
2234 l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 1 : 0));
2235
2236 if (lanes & DSI_DATA1_P)
2237 l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 0 : 1));
2238 if (lanes & DSI_DATA1_N)
2239 l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 1 : 0));
2240
2241 if (lanes & DSI_DATA2_P)
2242 l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 0 : 1));
2243 if (lanes & DSI_DATA2_N)
2244 l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 1 : 0));
2245
2246 if (lanes & DSI_DATA3_P)
2247 l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 0 : 1));
2248 if (lanes & DSI_DATA3_N)
2249 l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 1 : 0));
2250
2251 if (lanes & DSI_DATA4_P)
2252 l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 0 : 1));
2253 if (lanes & DSI_DATA4_N)
2254 l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 1 : 0));
2255 /*
2256 * Bits in REGLPTXSCPDAT4TO0DXDY:
2257 * 17: DY0 18: DX0
2258 * 19: DY1 20: DX1
2259 * 21: DY2 22: DX2
2260 * 23: DY3 24: DX3
2261 * 25: DY4 26: DX4
2262 */
2263
2264 /* Set the lane override configuration */
2265
2266 /* REGLPTXSCPDAT4TO0DXDY */
2267 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
1869 2268
1870static int dsi_complexio_init(struct omap_dss_device *dssdev) 2269 /* Enable lane override */
2270
2271 /* ENLPTXSCPDAT */
2272 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2273}
2274
2275static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
1871{ 2276{
1872 int r = 0; 2277 /* Disable lane override */
2278 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2279 /* Reset the lane override configuration */
2280 /* REGLPTXSCPDAT4TO0DXDY */
2281 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2282}
2283
2284static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
2285{
2286 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2287 int t;
2288 int bits[3];
2289 bool in_use[3];
2290
2291 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
2292 bits[0] = 28;
2293 bits[1] = 27;
2294 bits[2] = 26;
2295 } else {
2296 bits[0] = 24;
2297 bits[1] = 25;
2298 bits[2] = 26;
2299 }
2300
2301 in_use[0] = false;
2302 in_use[1] = false;
2303 in_use[2] = false;
2304
2305 if (dssdev->phy.dsi.clk_lane != 0)
2306 in_use[dssdev->phy.dsi.clk_lane - 1] = true;
2307 if (dssdev->phy.dsi.data1_lane != 0)
2308 in_use[dssdev->phy.dsi.data1_lane - 1] = true;
2309 if (dssdev->phy.dsi.data2_lane != 0)
2310 in_use[dssdev->phy.dsi.data2_lane - 1] = true;
2311
2312 t = 100000;
2313 while (true) {
2314 u32 l;
2315 int i;
2316 int ok;
2317
2318 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2319
2320 ok = 0;
2321 for (i = 0; i < 3; ++i) {
2322 if (!in_use[i] || (l & (1 << bits[i])))
2323 ok++;
2324 }
2325
2326 if (ok == 3)
2327 break;
2328
2329 if (--t == 0) {
2330 for (i = 0; i < 3; ++i) {
2331 if (!in_use[i] || (l & (1 << bits[i])))
2332 continue;
2333
2334 DSSERR("CIO TXCLKESC%d domain not coming " \
2335 "out of reset\n", i);
2336 }
2337 return -EIO;
2338 }
2339 }
2340
2341 return 0;
2342}
2343
2344static int dsi_cio_init(struct omap_dss_device *dssdev)
2345{
2346 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2347 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2348 int r;
2349 int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
2350 u32 l;
1873 2351
1874 DSSDBG("dsi_complexio_init\n"); 2352 DSSDBGF();
1875 2353
1876 /* CIO_CLK_ICG, enable L3 clk to CIO */ 2354 if (dsi->dsi_mux_pads)
1877 REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14); 2355 dsi->dsi_mux_pads(true);
2356
2357 dsi_enable_scp_clk(dsidev);
1878 2358
1879 /* A dummy read using the SCP interface to any DSIPHY register is 2359 /* A dummy read using the SCP interface to any DSIPHY register is
1880 * required after DSIPHY reset to complete the reset of the DSI complex 2360 * required after DSIPHY reset to complete the reset of the DSI complex
1881 * I/O. */ 2361 * I/O. */
1882 dsi_read_reg(DSI_DSIPHY_CFG5); 2362 dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1883 2363
1884 if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) { 2364 if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
1885 DSSERR("ComplexIO PHY not coming out of reset.\n"); 2365 DSSERR("CIO SCP Clock domain not coming out of reset.\n");
1886 r = -ENODEV; 2366 r = -EIO;
1887 goto err; 2367 goto err_scp_clk_dom;
1888 } 2368 }
1889 2369
1890 dsi_complexio_config(dssdev); 2370 dsi_set_lane_config(dssdev);
2371
2372 /* set TX STOP MODE timer to maximum for this operation */
2373 l = dsi_read_reg(dsidev, DSI_TIMING1);
2374 l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2375 l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
2376 l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
2377 l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
2378 dsi_write_reg(dsidev, DSI_TIMING1, l);
1891 2379
1892 r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON); 2380 if (dsi->ulps_enabled) {
2381 u32 lane_mask = DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P;
1893 2382
2383 DSSDBG("manual ulps exit\n");
2384
2385 /* ULPS is exited by Mark-1 state for 1ms, followed by
2386 * stop state. DSS HW cannot do this via the normal
2387 * ULPS exit sequence, as after reset the DSS HW thinks
2388 * that we are not in ULPS mode, and refuses to send the
2389 * sequence. So we need to send the ULPS exit sequence
2390 * manually.
2391 */
2392
2393 if (num_data_lanes_dssdev > 2)
2394 lane_mask |= DSI_DATA3_P;
2395
2396 if (num_data_lanes_dssdev > 3)
2397 lane_mask |= DSI_DATA4_P;
2398
2399 dsi_cio_enable_lane_override(dssdev, lane_mask);
2400 }
2401
2402 r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
1894 if (r) 2403 if (r)
1895 goto err; 2404 goto err_cio_pwr;
1896 2405
1897 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) { 2406 if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
1898 DSSERR("ComplexIO not coming out of reset.\n"); 2407 DSSERR("CIO PWR clock domain not coming out of reset.\n");
1899 r = -ENODEV; 2408 r = -ENODEV;
1900 goto err; 2409 goto err_cio_pwr_dom;
1901 } 2410 }
1902 2411
1903 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) { 2412 dsi_if_enable(dsidev, true);
1904 DSSERR("ComplexIO LDO power down.\n"); 2413 dsi_if_enable(dsidev, false);
1905 r = -ENODEV; 2414 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
1906 goto err; 2415
2416 r = dsi_cio_wait_tx_clk_esc_reset(dssdev);
2417 if (r)
2418 goto err_tx_clk_esc_rst;
2419
2420 if (dsi->ulps_enabled) {
2421 /* Keep Mark-1 state for 1ms (as per DSI spec) */
2422 ktime_t wait = ns_to_ktime(1000 * 1000);
2423 set_current_state(TASK_UNINTERRUPTIBLE);
2424 schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
2425
2426 /* Disable the override. The lanes should be set to Mark-11
2427 * state by the HW */
2428 dsi_cio_disable_lane_override(dsidev);
1907 } 2429 }
1908 2430
1909 dsi_complexio_timings(); 2431 /* FORCE_TX_STOP_MODE_IO */
2432 REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
1910 2433
1911 /* 2434 dsi_cio_timings(dsidev);
1912 The configuration of the DSI complex I/O (number of data lanes, 2435
1913 position, differential order) should not be changed while 2436 dsi->ulps_enabled = false;
1914 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. For the
1915 hardware to recognize a new configuration of the complex I/O (done
1916 in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to follow
1917 this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, next
1918 reset the DSS.DSI_CTRL[0] IF_EN to 0, then set DSS.DSI_CLK_CTRL[20]
1919 LP_CLK_ENABLE to 1, and finally, set again the DSS.DSI_CTRL[0] IF_EN
1920 bit to 1. If the sequence is not followed, the DSi complex I/O
1921 configuration is undetermined.
1922 */
1923 dsi_if_enable(1);
1924 dsi_if_enable(0);
1925 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
1926 dsi_if_enable(1);
1927 dsi_if_enable(0);
1928 2437
1929 DSSDBG("CIO init done\n"); 2438 DSSDBG("CIO init done\n");
1930err: 2439
2440 return 0;
2441
2442err_tx_clk_esc_rst:
2443 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2444err_cio_pwr_dom:
2445 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2446err_cio_pwr:
2447 if (dsi->ulps_enabled)
2448 dsi_cio_disable_lane_override(dsidev);
2449err_scp_clk_dom:
2450 dsi_disable_scp_clk(dsidev);
2451 if (dsi->dsi_mux_pads)
2452 dsi->dsi_mux_pads(false);
1931 return r; 2453 return r;
1932} 2454}
1933 2455
1934static void dsi_complexio_uninit(void) 2456static void dsi_cio_uninit(struct platform_device *dsidev)
1935{ 2457{
1936 dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF); 2458 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2459
2460 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2461 dsi_disable_scp_clk(dsidev);
2462 if (dsi->dsi_mux_pads)
2463 dsi->dsi_mux_pads(false);
1937} 2464}
1938 2465
1939static int _dsi_wait_reset(void) 2466static int _dsi_wait_reset(struct platform_device *dsidev)
1940{ 2467{
1941 int t = 0; 2468 int t = 0;
1942 2469
1943 while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) { 2470 while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) {
1944 if (++t > 5) { 2471 if (++t > 5) {
1945 DSSERR("soft reset failed\n"); 2472 DSSERR("soft reset failed\n");
1946 return -ENODEV; 2473 return -ENODEV;
@@ -1951,28 +2478,30 @@ static int _dsi_wait_reset(void)
1951 return 0; 2478 return 0;
1952} 2479}
1953 2480
1954static int _dsi_reset(void) 2481static int _dsi_reset(struct platform_device *dsidev)
1955{ 2482{
1956 /* Soft reset */ 2483 /* Soft reset */
1957 REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1); 2484 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1);
1958 return _dsi_wait_reset(); 2485 return _dsi_wait_reset(dsidev);
1959} 2486}
1960 2487
1961static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2, 2488static void dsi_config_tx_fifo(struct platform_device *dsidev,
2489 enum fifo_size size1, enum fifo_size size2,
1962 enum fifo_size size3, enum fifo_size size4) 2490 enum fifo_size size3, enum fifo_size size4)
1963{ 2491{
2492 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1964 u32 r = 0; 2493 u32 r = 0;
1965 int add = 0; 2494 int add = 0;
1966 int i; 2495 int i;
1967 2496
1968 dsi.vc[0].fifo_size = size1; 2497 dsi->vc[0].fifo_size = size1;
1969 dsi.vc[1].fifo_size = size2; 2498 dsi->vc[1].fifo_size = size2;
1970 dsi.vc[2].fifo_size = size3; 2499 dsi->vc[2].fifo_size = size3;
1971 dsi.vc[3].fifo_size = size4; 2500 dsi->vc[3].fifo_size = size4;
1972 2501
1973 for (i = 0; i < 4; i++) { 2502 for (i = 0; i < 4; i++) {
1974 u8 v; 2503 u8 v;
1975 int size = dsi.vc[i].fifo_size; 2504 int size = dsi->vc[i].fifo_size;
1976 2505
1977 if (add + size > 4) { 2506 if (add + size > 4) {
1978 DSSERR("Illegal FIFO configuration\n"); 2507 DSSERR("Illegal FIFO configuration\n");
@@ -1985,24 +2514,26 @@ static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
1985 add += size; 2514 add += size;
1986 } 2515 }
1987 2516
1988 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r); 2517 dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
1989} 2518}
1990 2519
1991static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2, 2520static void dsi_config_rx_fifo(struct platform_device *dsidev,
2521 enum fifo_size size1, enum fifo_size size2,
1992 enum fifo_size size3, enum fifo_size size4) 2522 enum fifo_size size3, enum fifo_size size4)
1993{ 2523{
2524 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1994 u32 r = 0; 2525 u32 r = 0;
1995 int add = 0; 2526 int add = 0;
1996 int i; 2527 int i;
1997 2528
1998 dsi.vc[0].fifo_size = size1; 2529 dsi->vc[0].fifo_size = size1;
1999 dsi.vc[1].fifo_size = size2; 2530 dsi->vc[1].fifo_size = size2;
2000 dsi.vc[2].fifo_size = size3; 2531 dsi->vc[2].fifo_size = size3;
2001 dsi.vc[3].fifo_size = size4; 2532 dsi->vc[3].fifo_size = size4;
2002 2533
2003 for (i = 0; i < 4; i++) { 2534 for (i = 0; i < 4; i++) {
2004 u8 v; 2535 u8 v;
2005 int size = dsi.vc[i].fifo_size; 2536 int size = dsi->vc[i].fifo_size;
2006 2537
2007 if (add + size > 4) { 2538 if (add + size > 4) {
2008 DSSERR("Illegal FIFO configuration\n"); 2539 DSSERR("Illegal FIFO configuration\n");
@@ -2015,18 +2546,18 @@ static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2,
2015 add += size; 2546 add += size;
2016 } 2547 }
2017 2548
2018 dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r); 2549 dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
2019} 2550}
2020 2551
2021static int dsi_force_tx_stop_mode_io(void) 2552static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
2022{ 2553{
2023 u32 r; 2554 u32 r;
2024 2555
2025 r = dsi_read_reg(DSI_TIMING1); 2556 r = dsi_read_reg(dsidev, DSI_TIMING1);
2026 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ 2557 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2027 dsi_write_reg(DSI_TIMING1, r); 2558 dsi_write_reg(dsidev, DSI_TIMING1, r);
2028 2559
2029 if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) { 2560 if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
2030 DSSERR("TX_STOP bit not going down\n"); 2561 DSSERR("TX_STOP bit not going down\n");
2031 return -EIO; 2562 return -EIO;
2032 } 2563 }
@@ -2034,16 +2565,135 @@ static int dsi_force_tx_stop_mode_io(void)
2034 return 0; 2565 return 0;
2035} 2566}
2036 2567
2037static int dsi_vc_enable(int channel, bool enable) 2568static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2569{
2570 return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2571}
2572
2573static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2574{
2575 struct dsi_packet_sent_handler_data *vp_data =
2576 (struct dsi_packet_sent_handler_data *) data;
2577 struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2578 const int channel = dsi->update_channel;
2579 u8 bit = dsi->te_enabled ? 30 : 31;
2580
2581 if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2582 complete(vp_data->completion);
2583}
2584
2585static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2586{
2587 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2588 DECLARE_COMPLETION_ONSTACK(completion);
2589 struct dsi_packet_sent_handler_data vp_data = { dsidev, &completion };
2590 int r = 0;
2591 u8 bit;
2592
2593 bit = dsi->te_enabled ? 30 : 31;
2594
2595 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2596 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2597 if (r)
2598 goto err0;
2599
2600 /* Wait for completion only if TE_EN/TE_START is still set */
2601 if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2602 if (wait_for_completion_timeout(&completion,
2603 msecs_to_jiffies(10)) == 0) {
2604 DSSERR("Failed to complete previous frame transfer\n");
2605 r = -EIO;
2606 goto err1;
2607 }
2608 }
2609
2610 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2611 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2612
2613 return 0;
2614err1:
2615 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2616 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2617err0:
2618 return r;
2619}
2620
2621static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2622{
2623 struct dsi_packet_sent_handler_data *l4_data =
2624 (struct dsi_packet_sent_handler_data *) data;
2625 struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2626 const int channel = dsi->update_channel;
2627
2628 if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2629 complete(l4_data->completion);
2630}
2631
2632static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2633{
2634 DECLARE_COMPLETION_ONSTACK(completion);
2635 struct dsi_packet_sent_handler_data l4_data = { dsidev, &completion };
2636 int r = 0;
2637
2638 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2639 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2640 if (r)
2641 goto err0;
2642
2643 /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2644 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2645 if (wait_for_completion_timeout(&completion,
2646 msecs_to_jiffies(10)) == 0) {
2647 DSSERR("Failed to complete previous l4 transfer\n");
2648 r = -EIO;
2649 goto err1;
2650 }
2651 }
2652
2653 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2654 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2655
2656 return 0;
2657err1:
2658 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2659 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2660err0:
2661 return r;
2662}
2663
2664static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2665{
2666 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2667
2668 WARN_ON(!dsi_bus_is_locked(dsidev));
2669
2670 WARN_ON(in_interrupt());
2671
2672 if (!dsi_vc_is_enabled(dsidev, channel))
2673 return 0;
2674
2675 switch (dsi->vc[channel].mode) {
2676 case DSI_VC_MODE_VP:
2677 return dsi_sync_vc_vp(dsidev, channel);
2678 case DSI_VC_MODE_L4:
2679 return dsi_sync_vc_l4(dsidev, channel);
2680 default:
2681 BUG();
2682 }
2683}
2684
2685static int dsi_vc_enable(struct platform_device *dsidev, int channel,
2686 bool enable)
2038{ 2687{
2039 DSSDBG("dsi_vc_enable channel %d, enable %d\n", 2688 DSSDBG("dsi_vc_enable channel %d, enable %d\n",
2040 channel, enable); 2689 channel, enable);
2041 2690
2042 enable = enable ? 1 : 0; 2691 enable = enable ? 1 : 0;
2043 2692
2044 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0); 2693 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
2045 2694
2046 if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) { 2695 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
2696 0, enable) != enable) {
2047 DSSERR("Failed to set dsi_vc_enable to %d\n", enable); 2697 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
2048 return -EIO; 2698 return -EIO;
2049 } 2699 }
@@ -2051,13 +2701,13 @@ static int dsi_vc_enable(int channel, bool enable)
2051 return 0; 2701 return 0;
2052} 2702}
2053 2703
2054static void dsi_vc_initial_config(int channel) 2704static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2055{ 2705{
2056 u32 r; 2706 u32 r;
2057 2707
2058 DSSDBGF("%d", channel); 2708 DSSDBGF("%d", channel);
2059 2709
2060 r = dsi_read_reg(DSI_VC_CTRL(channel)); 2710 r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2061 2711
2062 if (FLD_GET(r, 15, 15)) /* VC_BUSY */ 2712 if (FLD_GET(r, 15, 15)) /* VC_BUSY */
2063 DSSERR("VC(%d) busy when trying to configure it!\n", 2713 DSSERR("VC(%d) busy when trying to configure it!\n",
@@ -2070,85 +2720,107 @@ static void dsi_vc_initial_config(int channel)
2070 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */ 2720 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
2071 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */ 2721 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
2072 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */ 2722 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2723 if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
2724 r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
2073 2725
2074 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ 2726 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
2075 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ 2727 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
2076 2728
2077 dsi_write_reg(DSI_VC_CTRL(channel), r); 2729 dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2078} 2730}
2079 2731
2080static int dsi_vc_config_l4(int channel) 2732static int dsi_vc_config_l4(struct platform_device *dsidev, int channel)
2081{ 2733{
2082 if (dsi.vc[channel].mode == DSI_VC_MODE_L4) 2734 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2735
2736 if (dsi->vc[channel].mode == DSI_VC_MODE_L4)
2083 return 0; 2737 return 0;
2084 2738
2085 DSSDBGF("%d", channel); 2739 DSSDBGF("%d", channel);
2086 2740
2087 dsi_vc_enable(channel, 0); 2741 dsi_sync_vc(dsidev, channel);
2742
2743 dsi_vc_enable(dsidev, channel, 0);
2088 2744
2089 /* VC_BUSY */ 2745 /* VC_BUSY */
2090 if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) { 2746 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2091 DSSERR("vc(%d) busy when trying to config for L4\n", channel); 2747 DSSERR("vc(%d) busy when trying to config for L4\n", channel);
2092 return -EIO; 2748 return -EIO;
2093 } 2749 }
2094 2750
2095 REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */ 2751 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
2752
2753 /* DCS_CMD_ENABLE */
2754 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
2755 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 30, 30);
2096 2756
2097 dsi_vc_enable(channel, 1); 2757 dsi_vc_enable(dsidev, channel, 1);
2098 2758
2099 dsi.vc[channel].mode = DSI_VC_MODE_L4; 2759 dsi->vc[channel].mode = DSI_VC_MODE_L4;
2100 2760
2101 return 0; 2761 return 0;
2102} 2762}
2103 2763
2104static int dsi_vc_config_vp(int channel) 2764static int dsi_vc_config_vp(struct platform_device *dsidev, int channel)
2105{ 2765{
2106 if (dsi.vc[channel].mode == DSI_VC_MODE_VP) 2766 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2767
2768 if (dsi->vc[channel].mode == DSI_VC_MODE_VP)
2107 return 0; 2769 return 0;
2108 2770
2109 DSSDBGF("%d", channel); 2771 DSSDBGF("%d", channel);
2110 2772
2111 dsi_vc_enable(channel, 0); 2773 dsi_sync_vc(dsidev, channel);
2774
2775 dsi_vc_enable(dsidev, channel, 0);
2112 2776
2113 /* VC_BUSY */ 2777 /* VC_BUSY */
2114 if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) { 2778 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2115 DSSERR("vc(%d) busy when trying to config for VP\n", channel); 2779 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2116 return -EIO; 2780 return -EIO;
2117 } 2781 }
2118 2782
2119 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */ 2783 /* SOURCE, 1 = video port */
2784 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 1, 1);
2785
2786 /* DCS_CMD_ENABLE */
2787 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC))
2788 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 30, 30);
2120 2789
2121 dsi_vc_enable(channel, 1); 2790 dsi_vc_enable(dsidev, channel, 1);
2122 2791
2123 dsi.vc[channel].mode = DSI_VC_MODE_VP; 2792 dsi->vc[channel].mode = DSI_VC_MODE_VP;
2124 2793
2125 return 0; 2794 return 0;
2126} 2795}
2127 2796
2128 2797
2129void omapdss_dsi_vc_enable_hs(int channel, bool enable) 2798void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2799 bool enable)
2130{ 2800{
2801 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2802
2131 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable); 2803 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2132 2804
2133 WARN_ON(!dsi_bus_is_locked()); 2805 WARN_ON(!dsi_bus_is_locked(dsidev));
2134 2806
2135 dsi_vc_enable(channel, 0); 2807 dsi_vc_enable(dsidev, channel, 0);
2136 dsi_if_enable(0); 2808 dsi_if_enable(dsidev, 0);
2137 2809
2138 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9); 2810 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
2139 2811
2140 dsi_vc_enable(channel, 1); 2812 dsi_vc_enable(dsidev, channel, 1);
2141 dsi_if_enable(1); 2813 dsi_if_enable(dsidev, 1);
2142 2814
2143 dsi_force_tx_stop_mode_io(); 2815 dsi_force_tx_stop_mode_io(dsidev);
2144} 2816}
2145EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs); 2817EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
2146 2818
2147static void dsi_vc_flush_long_data(int channel) 2819static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
2148{ 2820{
2149 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { 2821 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2150 u32 val; 2822 u32 val;
2151 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); 2823 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2152 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n", 2824 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
2153 (val >> 0) & 0xff, 2825 (val >> 0) & 0xff,
2154 (val >> 8) & 0xff, 2826 (val >> 8) & 0xff,
@@ -2194,13 +2866,14 @@ static void dsi_show_rx_ack_with_err(u16 err)
2194 DSSERR("\t\tDSI Protocol Violation\n"); 2866 DSSERR("\t\tDSI Protocol Violation\n");
2195} 2867}
2196 2868
2197static u16 dsi_vc_flush_receive_data(int channel) 2869static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2870 int channel)
2198{ 2871{
2199 /* RX_FIFO_NOT_EMPTY */ 2872 /* RX_FIFO_NOT_EMPTY */
2200 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { 2873 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2201 u32 val; 2874 u32 val;
2202 u8 dt; 2875 u8 dt;
2203 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); 2876 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2204 DSSERR("\trawval %#08x\n", val); 2877 DSSERR("\trawval %#08x\n", val);
2205 dt = FLD_GET(val, 5, 0); 2878 dt = FLD_GET(val, 5, 0);
2206 if (dt == DSI_DT_RX_ACK_WITH_ERR) { 2879 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
@@ -2215,7 +2888,7 @@ static u16 dsi_vc_flush_receive_data(int channel)
2215 } else if (dt == DSI_DT_RX_DCS_LONG_READ) { 2888 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2216 DSSERR("\tDCS long response, len %d\n", 2889 DSSERR("\tDCS long response, len %d\n",
2217 FLD_GET(val, 23, 8)); 2890 FLD_GET(val, 23, 8));
2218 dsi_vc_flush_long_data(channel); 2891 dsi_vc_flush_long_data(dsidev, channel);
2219 } else { 2892 } else {
2220 DSSERR("\tunknown datatype 0x%02x\n", dt); 2893 DSSERR("\tunknown datatype 0x%02x\n", dt);
2221 } 2894 }
@@ -2223,40 +2896,44 @@ static u16 dsi_vc_flush_receive_data(int channel)
2223 return 0; 2896 return 0;
2224} 2897}
2225 2898
2226static int dsi_vc_send_bta(int channel) 2899static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2227{ 2900{
2228 if (dsi.debug_write || dsi.debug_read) 2901 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2902
2903 if (dsi->debug_write || dsi->debug_read)
2229 DSSDBG("dsi_vc_send_bta %d\n", channel); 2904 DSSDBG("dsi_vc_send_bta %d\n", channel);
2230 2905
2231 WARN_ON(!dsi_bus_is_locked()); 2906 WARN_ON(!dsi_bus_is_locked(dsidev));
2232 2907
2233 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */ 2908 /* RX_FIFO_NOT_EMPTY */
2909 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2234 DSSERR("rx fifo not empty when sending BTA, dumping data:\n"); 2910 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2235 dsi_vc_flush_receive_data(channel); 2911 dsi_vc_flush_receive_data(dsidev, channel);
2236 } 2912 }
2237 2913
2238 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */ 2914 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
2239 2915
2240 return 0; 2916 return 0;
2241} 2917}
2242 2918
2243int dsi_vc_send_bta_sync(int channel) 2919int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
2244{ 2920{
2921 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2245 DECLARE_COMPLETION_ONSTACK(completion); 2922 DECLARE_COMPLETION_ONSTACK(completion);
2246 int r = 0; 2923 int r = 0;
2247 u32 err; 2924 u32 err;
2248 2925
2249 r = dsi_register_isr_vc(channel, dsi_completion_handler, 2926 r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
2250 &completion, DSI_VC_IRQ_BTA); 2927 &completion, DSI_VC_IRQ_BTA);
2251 if (r) 2928 if (r)
2252 goto err0; 2929 goto err0;
2253 2930
2254 r = dsi_register_isr(dsi_completion_handler, &completion, 2931 r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
2255 DSI_IRQ_ERROR_MASK); 2932 DSI_IRQ_ERROR_MASK);
2256 if (r) 2933 if (r)
2257 goto err1; 2934 goto err1;
2258 2935
2259 r = dsi_vc_send_bta(channel); 2936 r = dsi_vc_send_bta(dsidev, channel);
2260 if (r) 2937 if (r)
2261 goto err2; 2938 goto err2;
2262 2939
@@ -2267,41 +2944,42 @@ int dsi_vc_send_bta_sync(int channel)
2267 goto err2; 2944 goto err2;
2268 } 2945 }
2269 2946
2270 err = dsi_get_errors(); 2947 err = dsi_get_errors(dsidev);
2271 if (err) { 2948 if (err) {
2272 DSSERR("Error while sending BTA: %x\n", err); 2949 DSSERR("Error while sending BTA: %x\n", err);
2273 r = -EIO; 2950 r = -EIO;
2274 goto err2; 2951 goto err2;
2275 } 2952 }
2276err2: 2953err2:
2277 dsi_unregister_isr(dsi_completion_handler, &completion, 2954 dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
2278 DSI_IRQ_ERROR_MASK); 2955 DSI_IRQ_ERROR_MASK);
2279err1: 2956err1:
2280 dsi_unregister_isr_vc(channel, dsi_completion_handler, 2957 dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
2281 &completion, DSI_VC_IRQ_BTA); 2958 &completion, DSI_VC_IRQ_BTA);
2282err0: 2959err0:
2283 return r; 2960 return r;
2284} 2961}
2285EXPORT_SYMBOL(dsi_vc_send_bta_sync); 2962EXPORT_SYMBOL(dsi_vc_send_bta_sync);
2286 2963
2287static inline void dsi_vc_write_long_header(int channel, u8 data_type, 2964static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
2288 u16 len, u8 ecc) 2965 int channel, u8 data_type, u16 len, u8 ecc)
2289{ 2966{
2967 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2290 u32 val; 2968 u32 val;
2291 u8 data_id; 2969 u8 data_id;
2292 2970
2293 WARN_ON(!dsi_bus_is_locked()); 2971 WARN_ON(!dsi_bus_is_locked(dsidev));
2294 2972
2295 data_id = data_type | dsi.vc[channel].vc_id << 6; 2973 data_id = data_type | dsi->vc[channel].vc_id << 6;
2296 2974
2297 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | 2975 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
2298 FLD_VAL(ecc, 31, 24); 2976 FLD_VAL(ecc, 31, 24);
2299 2977
2300 dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val); 2978 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
2301} 2979}
2302 2980
2303static inline void dsi_vc_write_long_payload(int channel, 2981static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
2304 u8 b1, u8 b2, u8 b3, u8 b4) 2982 int channel, u8 b1, u8 b2, u8 b3, u8 b4)
2305{ 2983{
2306 u32 val; 2984 u32 val;
2307 2985
@@ -2310,34 +2988,35 @@ static inline void dsi_vc_write_long_payload(int channel,
2310/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n", 2988/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
2311 b1, b2, b3, b4, val); */ 2989 b1, b2, b3, b4, val); */
2312 2990
2313 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val); 2991 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
2314} 2992}
2315 2993
2316static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len, 2994static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
2317 u8 ecc) 2995 u8 data_type, u8 *data, u16 len, u8 ecc)
2318{ 2996{
2319 /*u32 val; */ 2997 /*u32 val; */
2998 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2320 int i; 2999 int i;
2321 u8 *p; 3000 u8 *p;
2322 int r = 0; 3001 int r = 0;
2323 u8 b1, b2, b3, b4; 3002 u8 b1, b2, b3, b4;
2324 3003
2325 if (dsi.debug_write) 3004 if (dsi->debug_write)
2326 DSSDBG("dsi_vc_send_long, %d bytes\n", len); 3005 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
2327 3006
2328 /* len + header */ 3007 /* len + header */
2329 if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) { 3008 if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
2330 DSSERR("unable to send long packet: packet too long.\n"); 3009 DSSERR("unable to send long packet: packet too long.\n");
2331 return -EINVAL; 3010 return -EINVAL;
2332 } 3011 }
2333 3012
2334 dsi_vc_config_l4(channel); 3013 dsi_vc_config_l4(dsidev, channel);
2335 3014
2336 dsi_vc_write_long_header(channel, data_type, len, ecc); 3015 dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
2337 3016
2338 p = data; 3017 p = data;
2339 for (i = 0; i < len >> 2; i++) { 3018 for (i = 0; i < len >> 2; i++) {
2340 if (dsi.debug_write) 3019 if (dsi->debug_write)
2341 DSSDBG("\tsending full packet %d\n", i); 3020 DSSDBG("\tsending full packet %d\n", i);
2342 3021
2343 b1 = *p++; 3022 b1 = *p++;
@@ -2345,14 +3024,14 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
2345 b3 = *p++; 3024 b3 = *p++;
2346 b4 = *p++; 3025 b4 = *p++;
2347 3026
2348 dsi_vc_write_long_payload(channel, b1, b2, b3, b4); 3027 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
2349 } 3028 }
2350 3029
2351 i = len % 4; 3030 i = len % 4;
2352 if (i) { 3031 if (i) {
2353 b1 = 0; b2 = 0; b3 = 0; 3032 b1 = 0; b2 = 0; b3 = 0;
2354 3033
2355 if (dsi.debug_write) 3034 if (dsi->debug_write)
2356 DSSDBG("\tsending remainder bytes %d\n", i); 3035 DSSDBG("\tsending remainder bytes %d\n", i);
2357 3036
2358 switch (i) { 3037 switch (i) {
@@ -2370,62 +3049,69 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
2370 break; 3049 break;
2371 } 3050 }
2372 3051
2373 dsi_vc_write_long_payload(channel, b1, b2, b3, 0); 3052 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
2374 } 3053 }
2375 3054
2376 return r; 3055 return r;
2377} 3056}
2378 3057
2379static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc) 3058static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
3059 u8 data_type, u16 data, u8 ecc)
2380{ 3060{
3061 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2381 u32 r; 3062 u32 r;
2382 u8 data_id; 3063 u8 data_id;
2383 3064
2384 WARN_ON(!dsi_bus_is_locked()); 3065 WARN_ON(!dsi_bus_is_locked(dsidev));
2385 3066
2386 if (dsi.debug_write) 3067 if (dsi->debug_write)
2387 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n", 3068 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
2388 channel, 3069 channel,
2389 data_type, data & 0xff, (data >> 8) & 0xff); 3070 data_type, data & 0xff, (data >> 8) & 0xff);
2390 3071
2391 dsi_vc_config_l4(channel); 3072 dsi_vc_config_l4(dsidev, channel);
2392 3073
2393 if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) { 3074 if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
2394 DSSERR("ERROR FIFO FULL, aborting transfer\n"); 3075 DSSERR("ERROR FIFO FULL, aborting transfer\n");
2395 return -EINVAL; 3076 return -EINVAL;
2396 } 3077 }
2397 3078
2398 data_id = data_type | dsi.vc[channel].vc_id << 6; 3079 data_id = data_type | dsi->vc[channel].vc_id << 6;
2399 3080
2400 r = (data_id << 0) | (data << 8) | (ecc << 24); 3081 r = (data_id << 0) | (data << 8) | (ecc << 24);
2401 3082
2402 dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r); 3083 dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
2403 3084
2404 return 0; 3085 return 0;
2405} 3086}
2406 3087
2407int dsi_vc_send_null(int channel) 3088int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
2408{ 3089{
3090 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2409 u8 nullpkg[] = {0, 0, 0, 0}; 3091 u8 nullpkg[] = {0, 0, 0, 0};
2410 return dsi_vc_send_long(channel, DSI_DT_NULL_PACKET, nullpkg, 4, 0); 3092
3093 return dsi_vc_send_long(dsidev, channel, DSI_DT_NULL_PACKET, nullpkg,
3094 4, 0);
2411} 3095}
2412EXPORT_SYMBOL(dsi_vc_send_null); 3096EXPORT_SYMBOL(dsi_vc_send_null);
2413 3097
2414int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len) 3098int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
3099 u8 *data, int len)
2415{ 3100{
3101 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2416 int r; 3102 int r;
2417 3103
2418 BUG_ON(len == 0); 3104 BUG_ON(len == 0);
2419 3105
2420 if (len == 1) { 3106 if (len == 1) {
2421 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0, 3107 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_0,
2422 data[0], 0); 3108 data[0], 0);
2423 } else if (len == 2) { 3109 } else if (len == 2) {
2424 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1, 3110 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_1,
2425 data[0] | (data[1] << 8), 0); 3111 data[0] | (data[1] << 8), 0);
2426 } else { 3112 } else {
2427 /* 0x39 = DCS Long Write */ 3113 /* 0x39 = DCS Long Write */
2428 r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE, 3114 r = dsi_vc_send_long(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
2429 data, len, 0); 3115 data, len, 0);
2430 } 3116 }
2431 3117
@@ -2433,21 +3119,24 @@ int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len)
2433} 3119}
2434EXPORT_SYMBOL(dsi_vc_dcs_write_nosync); 3120EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
2435 3121
2436int dsi_vc_dcs_write(int channel, u8 *data, int len) 3122int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3123 int len)
2437{ 3124{
3125 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2438 int r; 3126 int r;
2439 3127
2440 r = dsi_vc_dcs_write_nosync(channel, data, len); 3128 r = dsi_vc_dcs_write_nosync(dssdev, channel, data, len);
2441 if (r) 3129 if (r)
2442 goto err; 3130 goto err;
2443 3131
2444 r = dsi_vc_send_bta_sync(channel); 3132 r = dsi_vc_send_bta_sync(dssdev, channel);
2445 if (r) 3133 if (r)
2446 goto err; 3134 goto err;
2447 3135
2448 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */ 3136 /* RX_FIFO_NOT_EMPTY */
3137 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2449 DSSERR("rx fifo not empty after write, dumping data:\n"); 3138 DSSERR("rx fifo not empty after write, dumping data:\n");
2450 dsi_vc_flush_receive_data(channel); 3139 dsi_vc_flush_receive_data(dsidev, channel);
2451 r = -EIO; 3140 r = -EIO;
2452 goto err; 3141 goto err;
2453 } 3142 }
@@ -2460,47 +3149,51 @@ err:
2460} 3149}
2461EXPORT_SYMBOL(dsi_vc_dcs_write); 3150EXPORT_SYMBOL(dsi_vc_dcs_write);
2462 3151
2463int dsi_vc_dcs_write_0(int channel, u8 dcs_cmd) 3152int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd)
2464{ 3153{
2465 return dsi_vc_dcs_write(channel, &dcs_cmd, 1); 3154 return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1);
2466} 3155}
2467EXPORT_SYMBOL(dsi_vc_dcs_write_0); 3156EXPORT_SYMBOL(dsi_vc_dcs_write_0);
2468 3157
2469int dsi_vc_dcs_write_1(int channel, u8 dcs_cmd, u8 param) 3158int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3159 u8 param)
2470{ 3160{
2471 u8 buf[2]; 3161 u8 buf[2];
2472 buf[0] = dcs_cmd; 3162 buf[0] = dcs_cmd;
2473 buf[1] = param; 3163 buf[1] = param;
2474 return dsi_vc_dcs_write(channel, buf, 2); 3164 return dsi_vc_dcs_write(dssdev, channel, buf, 2);
2475} 3165}
2476EXPORT_SYMBOL(dsi_vc_dcs_write_1); 3166EXPORT_SYMBOL(dsi_vc_dcs_write_1);
2477 3167
2478int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen) 3168int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3169 u8 *buf, int buflen)
2479{ 3170{
3171 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3172 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2480 u32 val; 3173 u32 val;
2481 u8 dt; 3174 u8 dt;
2482 int r; 3175 int r;
2483 3176
2484 if (dsi.debug_read) 3177 if (dsi->debug_read)
2485 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd); 3178 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
2486 3179
2487 r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0); 3180 r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_READ, dcs_cmd, 0);
2488 if (r) 3181 if (r)
2489 goto err; 3182 goto err;
2490 3183
2491 r = dsi_vc_send_bta_sync(channel); 3184 r = dsi_vc_send_bta_sync(dssdev, channel);
2492 if (r) 3185 if (r)
2493 goto err; 3186 goto err;
2494 3187
2495 /* RX_FIFO_NOT_EMPTY */ 3188 /* RX_FIFO_NOT_EMPTY */
2496 if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) { 3189 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
2497 DSSERR("RX fifo empty when trying to read.\n"); 3190 DSSERR("RX fifo empty when trying to read.\n");
2498 r = -EIO; 3191 r = -EIO;
2499 goto err; 3192 goto err;
2500 } 3193 }
2501 3194
2502 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); 3195 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2503 if (dsi.debug_read) 3196 if (dsi->debug_read)
2504 DSSDBG("\theader: %08x\n", val); 3197 DSSDBG("\theader: %08x\n", val);
2505 dt = FLD_GET(val, 5, 0); 3198 dt = FLD_GET(val, 5, 0);
2506 if (dt == DSI_DT_RX_ACK_WITH_ERR) { 3199 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
@@ -2511,7 +3204,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2511 3204
2512 } else if (dt == DSI_DT_RX_SHORT_READ_1) { 3205 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
2513 u8 data = FLD_GET(val, 15, 8); 3206 u8 data = FLD_GET(val, 15, 8);
2514 if (dsi.debug_read) 3207 if (dsi->debug_read)
2515 DSSDBG("\tDCS short response, 1 byte: %02x\n", data); 3208 DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
2516 3209
2517 if (buflen < 1) { 3210 if (buflen < 1) {
@@ -2524,7 +3217,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2524 return 1; 3217 return 1;
2525 } else if (dt == DSI_DT_RX_SHORT_READ_2) { 3218 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
2526 u16 data = FLD_GET(val, 23, 8); 3219 u16 data = FLD_GET(val, 23, 8);
2527 if (dsi.debug_read) 3220 if (dsi->debug_read)
2528 DSSDBG("\tDCS short response, 2 byte: %04x\n", data); 3221 DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
2529 3222
2530 if (buflen < 2) { 3223 if (buflen < 2) {
@@ -2539,7 +3232,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2539 } else if (dt == DSI_DT_RX_DCS_LONG_READ) { 3232 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2540 int w; 3233 int w;
2541 int len = FLD_GET(val, 23, 8); 3234 int len = FLD_GET(val, 23, 8);
2542 if (dsi.debug_read) 3235 if (dsi->debug_read)
2543 DSSDBG("\tDCS long response, len %d\n", len); 3236 DSSDBG("\tDCS long response, len %d\n", len);
2544 3237
2545 if (len > buflen) { 3238 if (len > buflen) {
@@ -2550,8 +3243,9 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2550 /* two byte checksum ends the packet, not included in len */ 3243 /* two byte checksum ends the packet, not included in len */
2551 for (w = 0; w < len + 2;) { 3244 for (w = 0; w < len + 2;) {
2552 int b; 3245 int b;
2553 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); 3246 val = dsi_read_reg(dsidev,
2554 if (dsi.debug_read) 3247 DSI_VC_SHORT_PACKET_HEADER(channel));
3248 if (dsi->debug_read)
2555 DSSDBG("\t\t%02x %02x %02x %02x\n", 3249 DSSDBG("\t\t%02x %02x %02x %02x\n",
2556 (val >> 0) & 0xff, 3250 (val >> 0) & 0xff,
2557 (val >> 8) & 0xff, 3251 (val >> 8) & 0xff,
@@ -2582,11 +3276,12 @@ err:
2582} 3276}
2583EXPORT_SYMBOL(dsi_vc_dcs_read); 3277EXPORT_SYMBOL(dsi_vc_dcs_read);
2584 3278
2585int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data) 3279int dsi_vc_dcs_read_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3280 u8 *data)
2586{ 3281{
2587 int r; 3282 int r;
2588 3283
2589 r = dsi_vc_dcs_read(channel, dcs_cmd, data, 1); 3284 r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, data, 1);
2590 3285
2591 if (r < 0) 3286 if (r < 0)
2592 return r; 3287 return r;
@@ -2598,12 +3293,13 @@ int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data)
2598} 3293}
2599EXPORT_SYMBOL(dsi_vc_dcs_read_1); 3294EXPORT_SYMBOL(dsi_vc_dcs_read_1);
2600 3295
2601int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u8 *data1, u8 *data2) 3296int dsi_vc_dcs_read_2(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3297 u8 *data1, u8 *data2)
2602{ 3298{
2603 u8 buf[2]; 3299 u8 buf[2];
2604 int r; 3300 int r;
2605 3301
2606 r = dsi_vc_dcs_read(channel, dcs_cmd, buf, 2); 3302 r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, buf, 2);
2607 3303
2608 if (r < 0) 3304 if (r < 0)
2609 return r; 3305 return r;
@@ -2618,14 +3314,94 @@ int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u8 *data1, u8 *data2)
2618} 3314}
2619EXPORT_SYMBOL(dsi_vc_dcs_read_2); 3315EXPORT_SYMBOL(dsi_vc_dcs_read_2);
2620 3316
2621int dsi_vc_set_max_rx_packet_size(int channel, u16 len) 3317int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3318 u16 len)
2622{ 3319{
2623 return dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE, 3320 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3321
3322 return dsi_vc_send_short(dsidev, channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
2624 len, 0); 3323 len, 0);
2625} 3324}
2626EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size); 3325EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
2627 3326
2628static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16) 3327static int dsi_enter_ulps(struct platform_device *dsidev)
3328{
3329 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3330 DECLARE_COMPLETION_ONSTACK(completion);
3331 int r;
3332
3333 DSSDBGF();
3334
3335 WARN_ON(!dsi_bus_is_locked(dsidev));
3336
3337 WARN_ON(dsi->ulps_enabled);
3338
3339 if (dsi->ulps_enabled)
3340 return 0;
3341
3342 if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3343 DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n");
3344 return -EIO;
3345 }
3346
3347 dsi_sync_vc(dsidev, 0);
3348 dsi_sync_vc(dsidev, 1);
3349 dsi_sync_vc(dsidev, 2);
3350 dsi_sync_vc(dsidev, 3);
3351
3352 dsi_force_tx_stop_mode_io(dsidev);
3353
3354 dsi_vc_enable(dsidev, 0, false);
3355 dsi_vc_enable(dsidev, 1, false);
3356 dsi_vc_enable(dsidev, 2, false);
3357 dsi_vc_enable(dsidev, 3, false);
3358
3359 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
3360 DSSERR("HS busy when enabling ULPS\n");
3361 return -EIO;
3362 }
3363
3364 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
3365 DSSERR("LP busy when enabling ULPS\n");
3366 return -EIO;
3367 }
3368
3369 r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3370 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3371 if (r)
3372 return r;
3373
3374 /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
3375 /* LANEx_ULPS_SIG2 */
3376 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2),
3377 7, 5);
3378
3379 if (wait_for_completion_timeout(&completion,
3380 msecs_to_jiffies(1000)) == 0) {
3381 DSSERR("ULPS enable timeout\n");
3382 r = -EIO;
3383 goto err;
3384 }
3385
3386 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3387 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3388
3389 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3390
3391 dsi_if_enable(dsidev, false);
3392
3393 dsi->ulps_enabled = true;
3394
3395 return 0;
3396
3397err:
3398 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3399 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3400 return r;
3401}
3402
3403static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
3404 unsigned ticks, bool x4, bool x16)
2629{ 3405{
2630 unsigned long fck; 3406 unsigned long fck;
2631 unsigned long total_ticks; 3407 unsigned long total_ticks;
@@ -2634,14 +3410,14 @@ static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16)
2634 BUG_ON(ticks > 0x1fff); 3410 BUG_ON(ticks > 0x1fff);
2635 3411
2636 /* ticks in DSI_FCK */ 3412 /* ticks in DSI_FCK */
2637 fck = dsi_fclk_rate(); 3413 fck = dsi_fclk_rate(dsidev);
2638 3414
2639 r = dsi_read_reg(DSI_TIMING2); 3415 r = dsi_read_reg(dsidev, DSI_TIMING2);
2640 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ 3416 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
2641 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */ 3417 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
2642 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */ 3418 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
2643 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ 3419 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
2644 dsi_write_reg(DSI_TIMING2, r); 3420 dsi_write_reg(dsidev, DSI_TIMING2, r);
2645 3421
2646 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); 3422 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
2647 3423
@@ -2651,7 +3427,8 @@ static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16)
2651 (total_ticks * 1000) / (fck / 1000 / 1000)); 3427 (total_ticks * 1000) / (fck / 1000 / 1000));
2652} 3428}
2653 3429
2654static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16) 3430static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
3431 bool x8, bool x16)
2655{ 3432{
2656 unsigned long fck; 3433 unsigned long fck;
2657 unsigned long total_ticks; 3434 unsigned long total_ticks;
@@ -2660,14 +3437,14 @@ static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16)
2660 BUG_ON(ticks > 0x1fff); 3437 BUG_ON(ticks > 0x1fff);
2661 3438
2662 /* ticks in DSI_FCK */ 3439 /* ticks in DSI_FCK */
2663 fck = dsi_fclk_rate(); 3440 fck = dsi_fclk_rate(dsidev);
2664 3441
2665 r = dsi_read_reg(DSI_TIMING1); 3442 r = dsi_read_reg(dsidev, DSI_TIMING1);
2666 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */ 3443 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
2667 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */ 3444 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
2668 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */ 3445 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
2669 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */ 3446 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
2670 dsi_write_reg(DSI_TIMING1, r); 3447 dsi_write_reg(dsidev, DSI_TIMING1, r);
2671 3448
2672 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1); 3449 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
2673 3450
@@ -2677,7 +3454,8 @@ static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16)
2677 (total_ticks * 1000) / (fck / 1000 / 1000)); 3454 (total_ticks * 1000) / (fck / 1000 / 1000));
2678} 3455}
2679 3456
2680static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16) 3457static void dsi_set_stop_state_counter(struct platform_device *dsidev,
3458 unsigned ticks, bool x4, bool x16)
2681{ 3459{
2682 unsigned long fck; 3460 unsigned long fck;
2683 unsigned long total_ticks; 3461 unsigned long total_ticks;
@@ -2686,14 +3464,14 @@ static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16)
2686 BUG_ON(ticks > 0x1fff); 3464 BUG_ON(ticks > 0x1fff);
2687 3465
2688 /* ticks in DSI_FCK */ 3466 /* ticks in DSI_FCK */
2689 fck = dsi_fclk_rate(); 3467 fck = dsi_fclk_rate(dsidev);
2690 3468
2691 r = dsi_read_reg(DSI_TIMING1); 3469 r = dsi_read_reg(dsidev, DSI_TIMING1);
2692 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ 3470 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2693 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */ 3471 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
2694 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */ 3472 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
2695 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */ 3473 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
2696 dsi_write_reg(DSI_TIMING1, r); 3474 dsi_write_reg(dsidev, DSI_TIMING1, r);
2697 3475
2698 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); 3476 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
2699 3477
@@ -2703,7 +3481,8 @@ static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16)
2703 (total_ticks * 1000) / (fck / 1000 / 1000)); 3481 (total_ticks * 1000) / (fck / 1000 / 1000));
2704} 3482}
2705 3483
2706static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16) 3484static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3485 unsigned ticks, bool x4, bool x16)
2707{ 3486{
2708 unsigned long fck; 3487 unsigned long fck;
2709 unsigned long total_ticks; 3488 unsigned long total_ticks;
@@ -2712,14 +3491,14 @@ static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16)
2712 BUG_ON(ticks > 0x1fff); 3491 BUG_ON(ticks > 0x1fff);
2713 3492
2714 /* ticks in TxByteClkHS */ 3493 /* ticks in TxByteClkHS */
2715 fck = dsi_get_txbyteclkhs(); 3494 fck = dsi_get_txbyteclkhs(dsidev);
2716 3495
2717 r = dsi_read_reg(DSI_TIMING2); 3496 r = dsi_read_reg(dsidev, DSI_TIMING2);
2718 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */ 3497 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
2719 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */ 3498 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
2720 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */ 3499 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
2721 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */ 3500 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
2722 dsi_write_reg(DSI_TIMING2, r); 3501 dsi_write_reg(dsidev, DSI_TIMING2, r);
2723 3502
2724 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); 3503 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
2725 3504
@@ -2730,24 +3509,25 @@ static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16)
2730} 3509}
2731static int dsi_proto_config(struct omap_dss_device *dssdev) 3510static int dsi_proto_config(struct omap_dss_device *dssdev)
2732{ 3511{
3512 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2733 u32 r; 3513 u32 r;
2734 int buswidth = 0; 3514 int buswidth = 0;
2735 3515
2736 dsi_config_tx_fifo(DSI_FIFO_SIZE_32, 3516 dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
2737 DSI_FIFO_SIZE_32, 3517 DSI_FIFO_SIZE_32,
2738 DSI_FIFO_SIZE_32, 3518 DSI_FIFO_SIZE_32,
2739 DSI_FIFO_SIZE_32); 3519 DSI_FIFO_SIZE_32);
2740 3520
2741 dsi_config_rx_fifo(DSI_FIFO_SIZE_32, 3521 dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
2742 DSI_FIFO_SIZE_32, 3522 DSI_FIFO_SIZE_32,
2743 DSI_FIFO_SIZE_32, 3523 DSI_FIFO_SIZE_32,
2744 DSI_FIFO_SIZE_32); 3524 DSI_FIFO_SIZE_32);
2745 3525
2746 /* XXX what values for the timeouts? */ 3526 /* XXX what values for the timeouts? */
2747 dsi_set_stop_state_counter(0x1000, false, false); 3527 dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
2748 dsi_set_ta_timeout(0x1fff, true, true); 3528 dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
2749 dsi_set_lp_rx_timeout(0x1fff, true, true); 3529 dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
2750 dsi_set_hs_tx_timeout(0x1fff, true, true); 3530 dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
2751 3531
2752 switch (dssdev->ctrl.pixel_size) { 3532 switch (dssdev->ctrl.pixel_size) {
2753 case 16: 3533 case 16:
@@ -2763,7 +3543,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
2763 BUG(); 3543 BUG();
2764 } 3544 }
2765 3545
2766 r = dsi_read_reg(DSI_CTRL); 3546 r = dsi_read_reg(dsidev, DSI_CTRL);
2767 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */ 3547 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
2768 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */ 3548 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
2769 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */ 3549 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
@@ -2773,21 +3553,25 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
2773 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */ 3553 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
2774 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */ 3554 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
2775 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */ 3555 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
2776 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */ 3556 if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
2777 r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */ 3557 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
3558 /* DCS_CMD_CODE, 1=start, 0=continue */
3559 r = FLD_MOD(r, 0, 25, 25);
3560 }
2778 3561
2779 dsi_write_reg(DSI_CTRL, r); 3562 dsi_write_reg(dsidev, DSI_CTRL, r);
2780 3563
2781 dsi_vc_initial_config(0); 3564 dsi_vc_initial_config(dsidev, 0);
2782 dsi_vc_initial_config(1); 3565 dsi_vc_initial_config(dsidev, 1);
2783 dsi_vc_initial_config(2); 3566 dsi_vc_initial_config(dsidev, 2);
2784 dsi_vc_initial_config(3); 3567 dsi_vc_initial_config(dsidev, 3);
2785 3568
2786 return 0; 3569 return 0;
2787} 3570}
2788 3571
2789static void dsi_proto_timings(struct omap_dss_device *dssdev) 3572static void dsi_proto_timings(struct omap_dss_device *dssdev)
2790{ 3573{
3574 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2791 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail; 3575 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
2792 unsigned tclk_pre, tclk_post; 3576 unsigned tclk_pre, tclk_post;
2793 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero; 3577 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
@@ -2797,32 +3581,27 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
2797 unsigned ths_eot; 3581 unsigned ths_eot;
2798 u32 r; 3582 u32 r;
2799 3583
2800 r = dsi_read_reg(DSI_DSIPHY_CFG0); 3584 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
2801 ths_prepare = FLD_GET(r, 31, 24); 3585 ths_prepare = FLD_GET(r, 31, 24);
2802 ths_prepare_ths_zero = FLD_GET(r, 23, 16); 3586 ths_prepare_ths_zero = FLD_GET(r, 23, 16);
2803 ths_zero = ths_prepare_ths_zero - ths_prepare; 3587 ths_zero = ths_prepare_ths_zero - ths_prepare;
2804 ths_trail = FLD_GET(r, 15, 8); 3588 ths_trail = FLD_GET(r, 15, 8);
2805 ths_exit = FLD_GET(r, 7, 0); 3589 ths_exit = FLD_GET(r, 7, 0);
2806 3590
2807 r = dsi_read_reg(DSI_DSIPHY_CFG1); 3591 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
2808 tlpx = FLD_GET(r, 22, 16) * 2; 3592 tlpx = FLD_GET(r, 22, 16) * 2;
2809 tclk_trail = FLD_GET(r, 15, 8); 3593 tclk_trail = FLD_GET(r, 15, 8);
2810 tclk_zero = FLD_GET(r, 7, 0); 3594 tclk_zero = FLD_GET(r, 7, 0);
2811 3595
2812 r = dsi_read_reg(DSI_DSIPHY_CFG2); 3596 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
2813 tclk_prepare = FLD_GET(r, 7, 0); 3597 tclk_prepare = FLD_GET(r, 7, 0);
2814 3598
2815 /* min 8*UI */ 3599 /* min 8*UI */
2816 tclk_pre = 20; 3600 tclk_pre = 20;
2817 /* min 60ns + 52*UI */ 3601 /* min 60ns + 52*UI */
2818 tclk_post = ns2ddr(60) + 26; 3602 tclk_post = ns2ddr(dsidev, 60) + 26;
2819 3603
2820 /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */ 3604 ths_eot = DIV_ROUND_UP(4, dsi_get_num_data_lanes_dssdev(dssdev));
2821 if (dssdev->phy.dsi.data1_lane != 0 &&
2822 dssdev->phy.dsi.data2_lane != 0)
2823 ths_eot = 2;
2824 else
2825 ths_eot = 4;
2826 3605
2827 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare, 3606 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
2828 4); 3607 4);
@@ -2831,10 +3610,10 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
2831 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255); 3610 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
2832 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255); 3611 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
2833 3612
2834 r = dsi_read_reg(DSI_CLK_TIMING); 3613 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
2835 r = FLD_MOD(r, ddr_clk_pre, 15, 8); 3614 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
2836 r = FLD_MOD(r, ddr_clk_post, 7, 0); 3615 r = FLD_MOD(r, ddr_clk_post, 7, 0);
2837 dsi_write_reg(DSI_CLK_TIMING, r); 3616 dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
2838 3617
2839 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n", 3618 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
2840 ddr_clk_pre, 3619 ddr_clk_pre,
@@ -2848,7 +3627,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
2848 3627
2849 r = FLD_VAL(enter_hs_mode_lat, 31, 16) | 3628 r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
2850 FLD_VAL(exit_hs_mode_lat, 15, 0); 3629 FLD_VAL(exit_hs_mode_lat, 15, 0);
2851 dsi_write_reg(DSI_VM_TIMING7, r); 3630 dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
2852 3631
2853 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n", 3632 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
2854 enter_hs_mode_lat, exit_hs_mode_lat); 3633 enter_hs_mode_lat, exit_hs_mode_lat);
@@ -2858,25 +3637,27 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
2858#define DSI_DECL_VARS \ 3637#define DSI_DECL_VARS \
2859 int __dsi_cb = 0; u32 __dsi_cv = 0; 3638 int __dsi_cb = 0; u32 __dsi_cv = 0;
2860 3639
2861#define DSI_FLUSH(ch) \ 3640#define DSI_FLUSH(dsidev, ch) \
2862 if (__dsi_cb > 0) { \ 3641 if (__dsi_cb > 0) { \
2863 /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \ 3642 /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
2864 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \ 3643 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
2865 __dsi_cb = __dsi_cv = 0; \ 3644 __dsi_cb = __dsi_cv = 0; \
2866 } 3645 }
2867 3646
2868#define DSI_PUSH(ch, data) \ 3647#define DSI_PUSH(dsidev, ch, data) \
2869 do { \ 3648 do { \
2870 __dsi_cv |= (data) << (__dsi_cb * 8); \ 3649 __dsi_cv |= (data) << (__dsi_cb * 8); \
2871 /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \ 3650 /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
2872 if (++__dsi_cb > 3) \ 3651 if (++__dsi_cb > 3) \
2873 DSI_FLUSH(ch); \ 3652 DSI_FLUSH(dsidev, ch); \
2874 } while (0) 3653 } while (0)
2875 3654
2876static int dsi_update_screen_l4(struct omap_dss_device *dssdev, 3655static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2877 int x, int y, int w, int h) 3656 int x, int y, int w, int h)
2878{ 3657{
2879 /* Note: supports only 24bit colors in 32bit container */ 3658 /* Note: supports only 24bit colors in 32bit container */
3659 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3660 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2880 int first = 1; 3661 int first = 1;
2881 int fifo_stalls = 0; 3662 int fifo_stalls = 0;
2882 int max_dsi_packet_size; 3663 int max_dsi_packet_size;
@@ -2915,7 +3696,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2915 * in fifo */ 3696 * in fifo */
2916 3697
2917 /* When using CPU, max long packet size is TX buffer size */ 3698 /* When using CPU, max long packet size is TX buffer size */
2918 max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4; 3699 max_dsi_packet_size = dsi->vc[0].fifo_size * 32 * 4;
2919 3700
2920 /* we seem to get better perf if we divide the tx fifo to half, 3701 /* we seem to get better perf if we divide the tx fifo to half,
2921 and while the other half is being sent, we fill the other half 3702 and while the other half is being sent, we fill the other half
@@ -2944,35 +3725,36 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2944#if 1 3725#if 1
2945 /* using fifo not empty */ 3726 /* using fifo not empty */
2946 /* TX_FIFO_NOT_EMPTY */ 3727 /* TX_FIFO_NOT_EMPTY */
2947 while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) { 3728 while (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(0)), 5, 5)) {
2948 fifo_stalls++; 3729 fifo_stalls++;
2949 if (fifo_stalls > 0xfffff) { 3730 if (fifo_stalls > 0xfffff) {
2950 DSSERR("fifo stalls overflow, pixels left %d\n", 3731 DSSERR("fifo stalls overflow, pixels left %d\n",
2951 pixels_left); 3732 pixels_left);
2952 dsi_if_enable(0); 3733 dsi_if_enable(dsidev, 0);
2953 return -EIO; 3734 return -EIO;
2954 } 3735 }
2955 udelay(1); 3736 udelay(1);
2956 } 3737 }
2957#elif 1 3738#elif 1
2958 /* using fifo emptiness */ 3739 /* using fifo emptiness */
2959 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 < 3740 while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
2960 max_dsi_packet_size) { 3741 max_dsi_packet_size) {
2961 fifo_stalls++; 3742 fifo_stalls++;
2962 if (fifo_stalls > 0xfffff) { 3743 if (fifo_stalls > 0xfffff) {
2963 DSSERR("fifo stalls overflow, pixels left %d\n", 3744 DSSERR("fifo stalls overflow, pixels left %d\n",
2964 pixels_left); 3745 pixels_left);
2965 dsi_if_enable(0); 3746 dsi_if_enable(dsidev, 0);
2966 return -EIO; 3747 return -EIO;
2967 } 3748 }
2968 } 3749 }
2969#else 3750#else
2970 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) { 3751 while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS,
3752 7, 0) + 1) * 4 == 0) {
2971 fifo_stalls++; 3753 fifo_stalls++;
2972 if (fifo_stalls > 0xfffff) { 3754 if (fifo_stalls > 0xfffff) {
2973 DSSERR("fifo stalls overflow, pixels left %d\n", 3755 DSSERR("fifo stalls overflow, pixels left %d\n",
2974 pixels_left); 3756 pixels_left);
2975 dsi_if_enable(0); 3757 dsi_if_enable(dsidev, 0);
2976 return -EIO; 3758 return -EIO;
2977 } 3759 }
2978 } 3760 }
@@ -2981,17 +3763,17 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2981 3763
2982 pixels_left -= pixels; 3764 pixels_left -= pixels;
2983 3765
2984 dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE, 3766 dsi_vc_write_long_header(dsidev, 0, DSI_DT_DCS_LONG_WRITE,
2985 1 + pixels * bytespp, 0); 3767 1 + pixels * bytespp, 0);
2986 3768
2987 DSI_PUSH(0, dcs_cmd); 3769 DSI_PUSH(dsidev, 0, dcs_cmd);
2988 3770
2989 while (pixels-- > 0) { 3771 while (pixels-- > 0) {
2990 u32 pix = __raw_readl(data++); 3772 u32 pix = __raw_readl(data++);
2991 3773
2992 DSI_PUSH(0, (pix >> 16) & 0xff); 3774 DSI_PUSH(dsidev, 0, (pix >> 16) & 0xff);
2993 DSI_PUSH(0, (pix >> 8) & 0xff); 3775 DSI_PUSH(dsidev, 0, (pix >> 8) & 0xff);
2994 DSI_PUSH(0, (pix >> 0) & 0xff); 3776 DSI_PUSH(dsidev, 0, (pix >> 0) & 0xff);
2995 3777
2996 current_x++; 3778 current_x++;
2997 if (current_x == x+w) { 3779 if (current_x == x+w) {
@@ -3000,7 +3782,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
3000 } 3782 }
3001 } 3783 }
3002 3784
3003 DSI_FLUSH(0); 3785 DSI_FLUSH(dsidev, 0);
3004 } 3786 }
3005 3787
3006 return 0; 3788 return 0;
@@ -3009,6 +3791,8 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
3009static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, 3791static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3010 u16 x, u16 y, u16 w, u16 h) 3792 u16 x, u16 y, u16 w, u16 h)
3011{ 3793{
3794 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3795 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3012 unsigned bytespp; 3796 unsigned bytespp;
3013 unsigned bytespl; 3797 unsigned bytespl;
3014 unsigned bytespf; 3798 unsigned bytespf;
@@ -3017,16 +3801,13 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3017 unsigned packet_len; 3801 unsigned packet_len;
3018 u32 l; 3802 u32 l;
3019 int r; 3803 int r;
3020 const unsigned channel = dsi.update_channel; 3804 const unsigned channel = dsi->update_channel;
3021 /* line buffer is 1024 x 24bits */ 3805 const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
3022 /* XXX: for some reason using full buffer size causes considerable TX
3023 * slowdown with update sizes that fill the whole buffer */
3024 const unsigned line_buf_size = 1023 * 3;
3025 3806
3026 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n", 3807 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
3027 x, y, w, h); 3808 x, y, w, h);
3028 3809
3029 dsi_vc_config_vp(channel); 3810 dsi_vc_config_vp(dsidev, channel);
3030 3811
3031 bytespp = dssdev->ctrl.pixel_size / 8; 3812 bytespp = dssdev->ctrl.pixel_size / 8;
3032 bytespl = w * bytespp; 3813 bytespl = w * bytespp;
@@ -3047,15 +3828,16 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3047 total_len += (bytespf % packet_payload) + 1; 3828 total_len += (bytespf % packet_payload) + 1;
3048 3829
3049 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ 3830 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
3050 dsi_write_reg(DSI_VC_TE(channel), l); 3831 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3051 3832
3052 dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0); 3833 dsi_vc_write_long_header(dsidev, channel, DSI_DT_DCS_LONG_WRITE,
3834 packet_len, 0);
3053 3835
3054 if (dsi.te_enabled) 3836 if (dsi->te_enabled)
3055 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ 3837 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
3056 else 3838 else
3057 l = FLD_MOD(l, 1, 31, 31); /* TE_START */ 3839 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
3058 dsi_write_reg(DSI_VC_TE(channel), l); 3840 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
3059 3841
3060 /* We put SIDLEMODE to no-idle for the duration of the transfer, 3842 /* We put SIDLEMODE to no-idle for the duration of the transfer,
3061 * because DSS interrupts are not capable of waking up the CPU and the 3843 * because DSS interrupts are not capable of waking up the CPU and the
@@ -3065,23 +3847,23 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
3065 */ 3847 */
3066 dispc_disable_sidle(); 3848 dispc_disable_sidle();
3067 3849
3068 dsi_perf_mark_start(); 3850 dsi_perf_mark_start(dsidev);
3069 3851
3070 r = queue_delayed_work(dsi.workqueue, &dsi.framedone_timeout_work, 3852 r = schedule_delayed_work(&dsi->framedone_timeout_work,
3071 msecs_to_jiffies(250)); 3853 msecs_to_jiffies(250));
3072 BUG_ON(r == 0); 3854 BUG_ON(r == 0);
3073 3855
3074 dss_start_update(dssdev); 3856 dss_start_update(dssdev);
3075 3857
3076 if (dsi.te_enabled) { 3858 if (dsi->te_enabled) {
3077 /* disable LP_RX_TO, so that we can receive TE. Time to wait 3859 /* disable LP_RX_TO, so that we can receive TE. Time to wait
3078 * for TE is longer than the timer allows */ 3860 * for TE is longer than the timer allows */
3079 REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ 3861 REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
3080 3862
3081 dsi_vc_send_bta(channel); 3863 dsi_vc_send_bta(dsidev, channel);
3082 3864
3083#ifdef DSI_CATCH_MISSING_TE 3865#ifdef DSI_CATCH_MISSING_TE
3084 mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250)); 3866 mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
3085#endif 3867#endif
3086 } 3868 }
3087} 3869}
@@ -3093,41 +3875,28 @@ static void dsi_te_timeout(unsigned long arg)
3093} 3875}
3094#endif 3876#endif
3095 3877
3096static void dsi_framedone_bta_callback(void *data, u32 mask); 3878static void dsi_handle_framedone(struct platform_device *dsidev, int error)
3097
3098static void dsi_handle_framedone(int error)
3099{ 3879{
3100 const int channel = dsi.update_channel; 3880 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3101
3102 dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback,
3103 NULL, DSI_VC_IRQ_BTA);
3104
3105 cancel_delayed_work(&dsi.framedone_timeout_work);
3106 3881
3107 /* SIDLEMODE back to smart-idle */ 3882 /* SIDLEMODE back to smart-idle */
3108 dispc_enable_sidle(); 3883 dispc_enable_sidle();
3109 3884
3110 if (dsi.te_enabled) { 3885 if (dsi->te_enabled) {
3111 /* enable LP_RX_TO again after the TE */ 3886 /* enable LP_RX_TO again after the TE */
3112 REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ 3887 REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
3113 } 3888 }
3114 3889
3115 /* RX_FIFO_NOT_EMPTY */ 3890 dsi->framedone_callback(error, dsi->framedone_data);
3116 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
3117 DSSERR("Received error during frame transfer:\n");
3118 dsi_vc_flush_receive_data(channel);
3119 if (!error)
3120 error = -EIO;
3121 }
3122
3123 dsi.framedone_callback(error, dsi.framedone_data);
3124 3891
3125 if (!error) 3892 if (!error)
3126 dsi_perf_show("DISPC"); 3893 dsi_perf_show(dsidev, "DISPC");
3127} 3894}
3128 3895
3129static void dsi_framedone_timeout_work_callback(struct work_struct *work) 3896static void dsi_framedone_timeout_work_callback(struct work_struct *work)
3130{ 3897{
3898 struct dsi_data *dsi = container_of(work, struct dsi_data,
3899 framedone_timeout_work.work);
3131 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after 3900 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
3132 * 250ms which would conflict with this timeout work. What should be 3901 * 250ms which would conflict with this timeout work. What should be
3133 * done is first cancel the transfer on the HW, and then cancel the 3902 * done is first cancel the transfer on the HW, and then cancel the
@@ -3137,70 +3906,34 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
3137 3906
3138 DSSERR("Framedone not received for 250ms!\n"); 3907 DSSERR("Framedone not received for 250ms!\n");
3139 3908
3140 dsi_handle_framedone(-ETIMEDOUT); 3909 dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
3141}
3142
3143static void dsi_framedone_bta_callback(void *data, u32 mask)
3144{
3145 dsi_handle_framedone(0);
3146
3147#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
3148 dispc_fake_vsync_irq();
3149#endif
3150} 3910}
3151 3911
3152static void dsi_framedone_irq_callback(void *data, u32 mask) 3912static void dsi_framedone_irq_callback(void *data, u32 mask)
3153{ 3913{
3154 const int channel = dsi.update_channel; 3914 struct omap_dss_device *dssdev = (struct omap_dss_device *) data;
3155 int r; 3915 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3916 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3156 3917
3157 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and 3918 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
3158 * turns itself off. However, DSI still has the pixels in its buffers, 3919 * turns itself off. However, DSI still has the pixels in its buffers,
3159 * and is sending the data. 3920 * and is sending the data.
3160 */ 3921 */
3161 3922
3162 if (dsi.te_enabled) { 3923 __cancel_delayed_work(&dsi->framedone_timeout_work);
3163 /* enable LP_RX_TO again after the TE */
3164 REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
3165 }
3166
3167 /* Send BTA after the frame. We need this for the TE to work, as TE
3168 * trigger is only sent for BTAs without preceding packet. Thus we need
3169 * to BTA after the pixel packets so that next BTA will cause TE
3170 * trigger.
3171 *
3172 * This is not needed when TE is not in use, but we do it anyway to
3173 * make sure that the transfer has been completed. It would be more
3174 * optimal, but more complex, to wait only just before starting next
3175 * transfer.
3176 *
3177 * Also, as there's no interrupt telling when the transfer has been
3178 * done and the channel could be reconfigured, the only way is to
3179 * busyloop until TE_SIZE is zero. With BTA we can do this
3180 * asynchronously.
3181 * */
3182
3183 r = dsi_register_isr_vc(channel, dsi_framedone_bta_callback,
3184 NULL, DSI_VC_IRQ_BTA);
3185 if (r) {
3186 DSSERR("Failed to register BTA ISR\n");
3187 dsi_handle_framedone(-EIO);
3188 return;
3189 }
3190 3924
3191 r = dsi_vc_send_bta(channel); 3925 dsi_handle_framedone(dsidev, 0);
3192 if (r) { 3926
3193 DSSERR("BTA after framedone failed\n"); 3927#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
3194 dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback, 3928 dispc_fake_vsync_irq();
3195 NULL, DSI_VC_IRQ_BTA); 3929#endif
3196 dsi_handle_framedone(-EIO);
3197 }
3198} 3930}
3199 3931
3200int omap_dsi_prepare_update(struct omap_dss_device *dssdev, 3932int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
3201 u16 *x, u16 *y, u16 *w, u16 *h, 3933 u16 *x, u16 *y, u16 *w, u16 *h,
3202 bool enlarge_update_area) 3934 bool enlarge_update_area)
3203{ 3935{
3936 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3204 u16 dw, dh; 3937 u16 dw, dh;
3205 3938
3206 dssdev->driver->get_resolution(dssdev, &dw, &dh); 3939 dssdev->driver->get_resolution(dssdev, &dw, &dh);
@@ -3220,7 +3953,7 @@ int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
3220 if (*w == 0 || *h == 0) 3953 if (*w == 0 || *h == 0)
3221 return -EINVAL; 3954 return -EINVAL;
3222 3955
3223 dsi_perf_mark_setup(); 3956 dsi_perf_mark_setup(dsidev);
3224 3957
3225 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { 3958 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3226 dss_setup_partial_planes(dssdev, x, y, w, h, 3959 dss_setup_partial_planes(dssdev, x, y, w, h,
@@ -3237,7 +3970,10 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
3237 u16 x, u16 y, u16 w, u16 h, 3970 u16 x, u16 y, u16 w, u16 h,
3238 void (*callback)(int, void *), void *data) 3971 void (*callback)(int, void *), void *data)
3239{ 3972{
3240 dsi.update_channel = channel; 3973 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3974 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3975
3976 dsi->update_channel = channel;
3241 3977
3242 /* OMAP DSS cannot send updates of odd widths. 3978 /* OMAP DSS cannot send updates of odd widths.
3243 * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON 3979 * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
@@ -3246,14 +3982,14 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
3246 BUG_ON(x % 2 == 1); 3982 BUG_ON(x % 2 == 1);
3247 3983
3248 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { 3984 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
3249 dsi.framedone_callback = callback; 3985 dsi->framedone_callback = callback;
3250 dsi.framedone_data = data; 3986 dsi->framedone_data = data;
3251 3987
3252 dsi.update_region.x = x; 3988 dsi->update_region.x = x;
3253 dsi.update_region.y = y; 3989 dsi->update_region.y = y;
3254 dsi.update_region.w = w; 3990 dsi->update_region.w = w;
3255 dsi.update_region.h = h; 3991 dsi->update_region.h = h;
3256 dsi.update_region.device = dssdev; 3992 dsi->update_region.device = dssdev;
3257 3993
3258 dsi_update_screen_dispc(dssdev, x, y, w, h); 3994 dsi_update_screen_dispc(dssdev, x, y, w, h);
3259 } else { 3995 } else {
@@ -3263,7 +3999,7 @@ int omap_dsi_update(struct omap_dss_device *dssdev,
3263 if (r) 3999 if (r)
3264 return r; 4000 return r;
3265 4001
3266 dsi_perf_show("L4"); 4002 dsi_perf_show(dsidev, "L4");
3267 callback(0, data); 4003 callback(0, data);
3268 } 4004 }
3269 4005
@@ -3276,9 +4012,13 @@ EXPORT_SYMBOL(omap_dsi_update);
3276static int dsi_display_init_dispc(struct omap_dss_device *dssdev) 4012static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
3277{ 4013{
3278 int r; 4014 int r;
4015 u32 irq;
4016
4017 irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
4018 DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
3279 4019
3280 r = omap_dispc_register_isr(dsi_framedone_irq_callback, NULL, 4020 r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev,
3281 DISPC_IRQ_FRAMEDONE); 4021 irq);
3282 if (r) { 4022 if (r) {
3283 DSSERR("can't get FRAMEDONE irq\n"); 4023 DSSERR("can't get FRAMEDONE irq\n");
3284 return r; 4024 return r;
@@ -3311,28 +4051,34 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
3311 4051
3312static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) 4052static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
3313{ 4053{
3314 omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL, 4054 u32 irq;
3315 DISPC_IRQ_FRAMEDONE); 4055
4056 irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
4057 DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
4058
4059 omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev,
4060 irq);
3316} 4061}
3317 4062
3318static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) 4063static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
3319{ 4064{
4065 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3320 struct dsi_clock_info cinfo; 4066 struct dsi_clock_info cinfo;
3321 int r; 4067 int r;
3322 4068
3323 /* we always use DSS_CLK_SYSCK as input clock */ 4069 /* we always use DSS_CLK_SYSCK as input clock */
3324 cinfo.use_sys_clk = true; 4070 cinfo.use_sys_clk = true;
3325 cinfo.regn = dssdev->phy.dsi.div.regn; 4071 cinfo.regn = dssdev->clocks.dsi.regn;
3326 cinfo.regm = dssdev->phy.dsi.div.regm; 4072 cinfo.regm = dssdev->clocks.dsi.regm;
3327 cinfo.regm_dispc = dssdev->phy.dsi.div.regm_dispc; 4073 cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
3328 cinfo.regm_dsi = dssdev->phy.dsi.div.regm_dsi; 4074 cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
3329 r = dsi_calc_clock_rates(dssdev, &cinfo); 4075 r = dsi_calc_clock_rates(dssdev, &cinfo);
3330 if (r) { 4076 if (r) {
3331 DSSERR("Failed to calc dsi clocks\n"); 4077 DSSERR("Failed to calc dsi clocks\n");
3332 return r; 4078 return r;
3333 } 4079 }
3334 4080
3335 r = dsi_pll_set_clock_div(&cinfo); 4081 r = dsi_pll_set_clock_div(dsidev, &cinfo);
3336 if (r) { 4082 if (r) {
3337 DSSERR("Failed to set dsi clocks\n"); 4083 DSSERR("Failed to set dsi clocks\n");
3338 return r; 4084 return r;
@@ -3343,14 +4089,15 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
3343 4089
3344static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) 4090static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
3345{ 4091{
4092 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3346 struct dispc_clock_info dispc_cinfo; 4093 struct dispc_clock_info dispc_cinfo;
3347 int r; 4094 int r;
3348 unsigned long long fck; 4095 unsigned long long fck;
3349 4096
3350 fck = dsi_get_pll_hsdiv_dispc_rate(); 4097 fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
3351 4098
3352 dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div; 4099 dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
3353 dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div; 4100 dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
3354 4101
3355 r = dispc_calc_clock_rates(fck, &dispc_cinfo); 4102 r = dispc_calc_clock_rates(fck, &dispc_cinfo);
3356 if (r) { 4103 if (r) {
@@ -3369,11 +4116,11 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
3369 4116
3370static int dsi_display_init_dsi(struct omap_dss_device *dssdev) 4117static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
3371{ 4118{
4119 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4120 int dsi_module = dsi_get_dsidev_id(dsidev);
3372 int r; 4121 int r;
3373 4122
3374 _dsi_print_reset_status(); 4123 r = dsi_pll_init(dsidev, true, true);
3375
3376 r = dsi_pll_init(dssdev, true, true);
3377 if (r) 4124 if (r)
3378 goto err0; 4125 goto err0;
3379 4126
@@ -3381,8 +4128,10 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
3381 if (r) 4128 if (r)
3382 goto err1; 4129 goto err1;
3383 4130
3384 dss_select_dispc_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC); 4131 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
3385 dss_select_dsi_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI); 4132 dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
4133 dss_select_lcd_clk_source(dssdev->manager->id,
4134 dssdev->clocks.dispc.channel.lcd_clk_src);
3386 4135
3387 DSSDBG("PLL OK\n"); 4136 DSSDBG("PLL OK\n");
3388 4137
@@ -3390,82 +4139,92 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
3390 if (r) 4139 if (r)
3391 goto err2; 4140 goto err2;
3392 4141
3393 r = dsi_complexio_init(dssdev); 4142 r = dsi_cio_init(dssdev);
3394 if (r) 4143 if (r)
3395 goto err2; 4144 goto err2;
3396 4145
3397 _dsi_print_reset_status(); 4146 _dsi_print_reset_status(dsidev);
3398 4147
3399 dsi_proto_timings(dssdev); 4148 dsi_proto_timings(dssdev);
3400 dsi_set_lp_clk_divisor(dssdev); 4149 dsi_set_lp_clk_divisor(dssdev);
3401 4150
3402 if (1) 4151 if (1)
3403 _dsi_print_reset_status(); 4152 _dsi_print_reset_status(dsidev);
3404 4153
3405 r = dsi_proto_config(dssdev); 4154 r = dsi_proto_config(dssdev);
3406 if (r) 4155 if (r)
3407 goto err3; 4156 goto err3;
3408 4157
3409 /* enable interface */ 4158 /* enable interface */
3410 dsi_vc_enable(0, 1); 4159 dsi_vc_enable(dsidev, 0, 1);
3411 dsi_vc_enable(1, 1); 4160 dsi_vc_enable(dsidev, 1, 1);
3412 dsi_vc_enable(2, 1); 4161 dsi_vc_enable(dsidev, 2, 1);
3413 dsi_vc_enable(3, 1); 4162 dsi_vc_enable(dsidev, 3, 1);
3414 dsi_if_enable(1); 4163 dsi_if_enable(dsidev, 1);
3415 dsi_force_tx_stop_mode_io(); 4164 dsi_force_tx_stop_mode_io(dsidev);
3416 4165
3417 return 0; 4166 return 0;
3418err3: 4167err3:
3419 dsi_complexio_uninit(); 4168 dsi_cio_uninit(dsidev);
3420err2: 4169err2:
3421 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); 4170 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
3422 dss_select_dsi_clk_source(DSS_CLK_SRC_FCK); 4171 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
3423err1: 4172err1:
3424 dsi_pll_uninit(); 4173 dsi_pll_uninit(dsidev, true);
3425err0: 4174err0:
3426 return r; 4175 return r;
3427} 4176}
3428 4177
3429static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev) 4178static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4179 bool disconnect_lanes, bool enter_ulps)
3430{ 4180{
3431 /* disable interface */ 4181 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3432 dsi_if_enable(0); 4182 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3433 dsi_vc_enable(0, 0); 4183 int dsi_module = dsi_get_dsidev_id(dsidev);
3434 dsi_vc_enable(1, 0);
3435 dsi_vc_enable(2, 0);
3436 dsi_vc_enable(3, 0);
3437 4184
3438 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); 4185 if (enter_ulps && !dsi->ulps_enabled)
3439 dss_select_dsi_clk_source(DSS_CLK_SRC_FCK); 4186 dsi_enter_ulps(dsidev);
3440 dsi_complexio_uninit(); 4187
3441 dsi_pll_uninit(); 4188 /* disable interface */
4189 dsi_if_enable(dsidev, 0);
4190 dsi_vc_enable(dsidev, 0, 0);
4191 dsi_vc_enable(dsidev, 1, 0);
4192 dsi_vc_enable(dsidev, 2, 0);
4193 dsi_vc_enable(dsidev, 3, 0);
4194
4195 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4196 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
4197 dsi_cio_uninit(dsidev);
4198 dsi_pll_uninit(dsidev, disconnect_lanes);
3442} 4199}
3443 4200
3444static int dsi_core_init(void) 4201static int dsi_core_init(struct platform_device *dsidev)
3445{ 4202{
3446 /* Autoidle */ 4203 /* Autoidle */
3447 REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0); 4204 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0);
3448 4205
3449 /* ENWAKEUP */ 4206 /* ENWAKEUP */
3450 REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2); 4207 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2);
3451 4208
3452 /* SIDLEMODE smart-idle */ 4209 /* SIDLEMODE smart-idle */
3453 REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3); 4210 REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3);
3454 4211
3455 _dsi_initialize_irq(); 4212 _dsi_initialize_irq(dsidev);
3456 4213
3457 return 0; 4214 return 0;
3458} 4215}
3459 4216
3460int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) 4217int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
3461{ 4218{
4219 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4220 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3462 int r = 0; 4221 int r = 0;
3463 4222
3464 DSSDBG("dsi_display_enable\n"); 4223 DSSDBG("dsi_display_enable\n");
3465 4224
3466 WARN_ON(!dsi_bus_is_locked()); 4225 WARN_ON(!dsi_bus_is_locked(dsidev));
3467 4226
3468 mutex_lock(&dsi.lock); 4227 mutex_lock(&dsi->lock);
3469 4228
3470 r = omap_dss_start_device(dssdev); 4229 r = omap_dss_start_device(dssdev);
3471 if (r) { 4230 if (r) {
@@ -3474,13 +4233,13 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
3474 } 4233 }
3475 4234
3476 enable_clocks(1); 4235 enable_clocks(1);
3477 dsi_enable_pll_clock(1); 4236 dsi_enable_pll_clock(dsidev, 1);
3478 4237
3479 r = _dsi_reset(); 4238 r = _dsi_reset(dsidev);
3480 if (r) 4239 if (r)
3481 goto err1; 4240 goto err1;
3482 4241
3483 dsi_core_init(); 4242 dsi_core_init(dsidev);
3484 4243
3485 r = dsi_display_init_dispc(dssdev); 4244 r = dsi_display_init_dispc(dssdev);
3486 if (r) 4245 if (r)
@@ -3490,7 +4249,7 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
3490 if (r) 4249 if (r)
3491 goto err2; 4250 goto err2;
3492 4251
3493 mutex_unlock(&dsi.lock); 4252 mutex_unlock(&dsi->lock);
3494 4253
3495 return 0; 4254 return 0;
3496 4255
@@ -3498,39 +4257,46 @@ err2:
3498 dsi_display_uninit_dispc(dssdev); 4257 dsi_display_uninit_dispc(dssdev);
3499err1: 4258err1:
3500 enable_clocks(0); 4259 enable_clocks(0);
3501 dsi_enable_pll_clock(0); 4260 dsi_enable_pll_clock(dsidev, 0);
3502 omap_dss_stop_device(dssdev); 4261 omap_dss_stop_device(dssdev);
3503err0: 4262err0:
3504 mutex_unlock(&dsi.lock); 4263 mutex_unlock(&dsi->lock);
3505 DSSDBG("dsi_display_enable FAILED\n"); 4264 DSSDBG("dsi_display_enable FAILED\n");
3506 return r; 4265 return r;
3507} 4266}
3508EXPORT_SYMBOL(omapdss_dsi_display_enable); 4267EXPORT_SYMBOL(omapdss_dsi_display_enable);
3509 4268
3510void omapdss_dsi_display_disable(struct omap_dss_device *dssdev) 4269void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
4270 bool disconnect_lanes, bool enter_ulps)
3511{ 4271{
4272 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4273 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4274
3512 DSSDBG("dsi_display_disable\n"); 4275 DSSDBG("dsi_display_disable\n");
3513 4276
3514 WARN_ON(!dsi_bus_is_locked()); 4277 WARN_ON(!dsi_bus_is_locked(dsidev));
3515 4278
3516 mutex_lock(&dsi.lock); 4279 mutex_lock(&dsi->lock);
3517 4280
3518 dsi_display_uninit_dispc(dssdev); 4281 dsi_display_uninit_dispc(dssdev);
3519 4282
3520 dsi_display_uninit_dsi(dssdev); 4283 dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
3521 4284
3522 enable_clocks(0); 4285 enable_clocks(0);
3523 dsi_enable_pll_clock(0); 4286 dsi_enable_pll_clock(dsidev, 0);
3524 4287
3525 omap_dss_stop_device(dssdev); 4288 omap_dss_stop_device(dssdev);
3526 4289
3527 mutex_unlock(&dsi.lock); 4290 mutex_unlock(&dsi->lock);
3528} 4291}
3529EXPORT_SYMBOL(omapdss_dsi_display_disable); 4292EXPORT_SYMBOL(omapdss_dsi_display_disable);
3530 4293
3531int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable) 4294int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
3532{ 4295{
3533 dsi.te_enabled = enable; 4296 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4297 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4298
4299 dsi->te_enabled = enable;
3534 return 0; 4300 return 0;
3535} 4301}
3536EXPORT_SYMBOL(omapdss_dsi_enable_te); 4302EXPORT_SYMBOL(omapdss_dsi_enable_te);
@@ -3550,23 +4316,33 @@ void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
3550 4316
3551int dsi_init_display(struct omap_dss_device *dssdev) 4317int dsi_init_display(struct omap_dss_device *dssdev)
3552{ 4318{
4319 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4320 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4321 int dsi_module = dsi_get_dsidev_id(dsidev);
4322
3553 DSSDBG("DSI init\n"); 4323 DSSDBG("DSI init\n");
3554 4324
3555 /* XXX these should be figured out dynamically */ 4325 /* XXX these should be figured out dynamically */
3556 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | 4326 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
3557 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; 4327 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
3558 4328
3559 if (dsi.vdds_dsi_reg == NULL) { 4329 if (dsi->vdds_dsi_reg == NULL) {
3560 struct regulator *vdds_dsi; 4330 struct regulator *vdds_dsi;
3561 4331
3562 vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi"); 4332 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
3563 4333
3564 if (IS_ERR(vdds_dsi)) { 4334 if (IS_ERR(vdds_dsi)) {
3565 DSSERR("can't get VDDS_DSI regulator\n"); 4335 DSSERR("can't get VDDS_DSI regulator\n");
3566 return PTR_ERR(vdds_dsi); 4336 return PTR_ERR(vdds_dsi);
3567 } 4337 }
3568 4338
3569 dsi.vdds_dsi_reg = vdds_dsi; 4339 dsi->vdds_dsi_reg = vdds_dsi;
4340 }
4341
4342 if (dsi_get_num_data_lanes_dssdev(dssdev) > dsi->num_data_lanes) {
4343 DSSERR("DSI%d can't support more than %d data lanes\n",
4344 dsi_module + 1, dsi->num_data_lanes);
4345 return -EINVAL;
3570 } 4346 }
3571 4347
3572 return 0; 4348 return 0;
@@ -3574,11 +4350,13 @@ int dsi_init_display(struct omap_dss_device *dssdev)
3574 4350
3575int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel) 4351int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
3576{ 4352{
4353 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4354 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3577 int i; 4355 int i;
3578 4356
3579 for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) { 4357 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
3580 if (!dsi.vc[i].dssdev) { 4358 if (!dsi->vc[i].dssdev) {
3581 dsi.vc[i].dssdev = dssdev; 4359 dsi->vc[i].dssdev = dssdev;
3582 *channel = i; 4360 *channel = i;
3583 return 0; 4361 return 0;
3584 } 4362 }
@@ -3591,6 +4369,9 @@ EXPORT_SYMBOL(omap_dsi_request_vc);
3591 4369
3592int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id) 4370int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
3593{ 4371{
4372 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4373 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4374
3594 if (vc_id < 0 || vc_id > 3) { 4375 if (vc_id < 0 || vc_id > 3) {
3595 DSSERR("VC ID out of range\n"); 4376 DSSERR("VC ID out of range\n");
3596 return -EINVAL; 4377 return -EINVAL;
@@ -3601,13 +4382,13 @@ int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
3601 return -EINVAL; 4382 return -EINVAL;
3602 } 4383 }
3603 4384
3604 if (dsi.vc[channel].dssdev != dssdev) { 4385 if (dsi->vc[channel].dssdev != dssdev) {
3605 DSSERR("Virtual Channel not allocated to display %s\n", 4386 DSSERR("Virtual Channel not allocated to display %s\n",
3606 dssdev->name); 4387 dssdev->name);
3607 return -EINVAL; 4388 return -EINVAL;
3608 } 4389 }
3609 4390
3610 dsi.vc[channel].vc_id = vc_id; 4391 dsi->vc[channel].vc_id = vc_id;
3611 4392
3612 return 0; 4393 return 0;
3613} 4394}
@@ -3615,143 +4396,172 @@ EXPORT_SYMBOL(omap_dsi_set_vc_id);
3615 4396
3616void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel) 4397void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
3617{ 4398{
4399 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4400 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4401
3618 if ((channel >= 0 && channel <= 3) && 4402 if ((channel >= 0 && channel <= 3) &&
3619 dsi.vc[channel].dssdev == dssdev) { 4403 dsi->vc[channel].dssdev == dssdev) {
3620 dsi.vc[channel].dssdev = NULL; 4404 dsi->vc[channel].dssdev = NULL;
3621 dsi.vc[channel].vc_id = 0; 4405 dsi->vc[channel].vc_id = 0;
3622 } 4406 }
3623} 4407}
3624EXPORT_SYMBOL(omap_dsi_release_vc); 4408EXPORT_SYMBOL(omap_dsi_release_vc);
3625 4409
3626void dsi_wait_pll_hsdiv_dispc_active(void) 4410void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
3627{ 4411{
3628 if (wait_for_bit_change(DSI_PLL_STATUS, 7, 1) != 1) 4412 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
3629 DSSERR("%s (%s) not active\n", 4413 DSSERR("%s (%s) not active\n",
3630 dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), 4414 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
3631 dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC)); 4415 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
3632} 4416}
3633 4417
3634void dsi_wait_pll_hsdiv_dsi_active(void) 4418void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
3635{ 4419{
3636 if (wait_for_bit_change(DSI_PLL_STATUS, 8, 1) != 1) 4420 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
3637 DSSERR("%s (%s) not active\n", 4421 DSSERR("%s (%s) not active\n",
3638 dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), 4422 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
3639 dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI)); 4423 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
3640} 4424}
3641 4425
3642static void dsi_calc_clock_param_ranges(void) 4426static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
3643{ 4427{
3644 dsi.regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN); 4428 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3645 dsi.regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM); 4429
3646 dsi.regm_dispc_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC); 4430 dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
3647 dsi.regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI); 4431 dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
3648 dsi.fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT); 4432 dsi->regm_dispc_max =
3649 dsi.fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT); 4433 dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
3650 dsi.lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); 4434 dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
4435 dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
4436 dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
4437 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
3651} 4438}
3652 4439
3653static int dsi_init(struct platform_device *pdev) 4440static int dsi_init(struct platform_device *dsidev)
3654{ 4441{
4442 struct omap_display_platform_data *dss_plat_data;
4443 struct omap_dss_board_info *board_info;
3655 u32 rev; 4444 u32 rev;
3656 int r, i; 4445 int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
3657 struct resource *dsi_mem; 4446 struct resource *dsi_mem;
4447 struct dsi_data *dsi;
4448
4449 dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
4450 if (!dsi) {
4451 r = -ENOMEM;
4452 goto err0;
4453 }
4454
4455 dsi->pdev = dsidev;
4456 dsi_pdev_map[dsi_module] = dsidev;
4457 dev_set_drvdata(&dsidev->dev, dsi);
4458
4459 dss_plat_data = dsidev->dev.platform_data;
4460 board_info = dss_plat_data->board_data;
4461 dsi->dsi_mux_pads = board_info->dsi_mux_pads;
3658 4462
3659 spin_lock_init(&dsi.irq_lock); 4463 spin_lock_init(&dsi->irq_lock);
3660 spin_lock_init(&dsi.errors_lock); 4464 spin_lock_init(&dsi->errors_lock);
3661 dsi.errors = 0; 4465 dsi->errors = 0;
3662 4466
3663#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 4467#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
3664 spin_lock_init(&dsi.irq_stats_lock); 4468 spin_lock_init(&dsi->irq_stats_lock);
3665 dsi.irq_stats.last_reset = jiffies; 4469 dsi->irq_stats.last_reset = jiffies;
3666#endif 4470#endif
3667 4471
3668 mutex_init(&dsi.lock); 4472 mutex_init(&dsi->lock);
3669 sema_init(&dsi.bus_lock, 1); 4473 sema_init(&dsi->bus_lock, 1);
3670 4474
3671 dsi.workqueue = create_singlethread_workqueue("dsi"); 4475 INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
3672 if (dsi.workqueue == NULL)
3673 return -ENOMEM;
3674
3675 INIT_DELAYED_WORK_DEFERRABLE(&dsi.framedone_timeout_work,
3676 dsi_framedone_timeout_work_callback); 4476 dsi_framedone_timeout_work_callback);
3677 4477
3678#ifdef DSI_CATCH_MISSING_TE 4478#ifdef DSI_CATCH_MISSING_TE
3679 init_timer(&dsi.te_timer); 4479 init_timer(&dsi->te_timer);
3680 dsi.te_timer.function = dsi_te_timeout; 4480 dsi->te_timer.function = dsi_te_timeout;
3681 dsi.te_timer.data = 0; 4481 dsi->te_timer.data = 0;
3682#endif 4482#endif
3683 dsi_mem = platform_get_resource(dsi.pdev, IORESOURCE_MEM, 0); 4483 dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
3684 if (!dsi_mem) { 4484 if (!dsi_mem) {
3685 DSSERR("can't get IORESOURCE_MEM DSI\n"); 4485 DSSERR("can't get IORESOURCE_MEM DSI\n");
3686 r = -EINVAL; 4486 r = -EINVAL;
3687 goto err1; 4487 goto err1;
3688 } 4488 }
3689 dsi.base = ioremap(dsi_mem->start, resource_size(dsi_mem)); 4489 dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
3690 if (!dsi.base) { 4490 if (!dsi->base) {
3691 DSSERR("can't ioremap DSI\n"); 4491 DSSERR("can't ioremap DSI\n");
3692 r = -ENOMEM; 4492 r = -ENOMEM;
3693 goto err1; 4493 goto err1;
3694 } 4494 }
3695 dsi.irq = platform_get_irq(dsi.pdev, 0); 4495 dsi->irq = platform_get_irq(dsi->pdev, 0);
3696 if (dsi.irq < 0) { 4496 if (dsi->irq < 0) {
3697 DSSERR("platform_get_irq failed\n"); 4497 DSSERR("platform_get_irq failed\n");
3698 r = -ENODEV; 4498 r = -ENODEV;
3699 goto err2; 4499 goto err2;
3700 } 4500 }
3701 4501
3702 r = request_irq(dsi.irq, omap_dsi_irq_handler, IRQF_SHARED, 4502 r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
3703 "OMAP DSI1", dsi.pdev); 4503 dev_name(&dsidev->dev), dsi->pdev);
3704 if (r < 0) { 4504 if (r < 0) {
3705 DSSERR("request_irq failed\n"); 4505 DSSERR("request_irq failed\n");
3706 goto err2; 4506 goto err2;
3707 } 4507 }
3708 4508
3709 /* DSI VCs initialization */ 4509 /* DSI VCs initialization */
3710 for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) { 4510 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
3711 dsi.vc[i].mode = DSI_VC_MODE_L4; 4511 dsi->vc[i].mode = DSI_VC_MODE_L4;
3712 dsi.vc[i].dssdev = NULL; 4512 dsi->vc[i].dssdev = NULL;
3713 dsi.vc[i].vc_id = 0; 4513 dsi->vc[i].vc_id = 0;
3714 } 4514 }
3715 4515
3716 dsi_calc_clock_param_ranges(); 4516 dsi_calc_clock_param_ranges(dsidev);
3717 4517
3718 enable_clocks(1); 4518 enable_clocks(1);
3719 4519
3720 rev = dsi_read_reg(DSI_REVISION); 4520 rev = dsi_read_reg(dsidev, DSI_REVISION);
3721 dev_dbg(&pdev->dev, "OMAP DSI rev %d.%d\n", 4521 dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
3722 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); 4522 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
3723 4523
4524 dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
4525
3724 enable_clocks(0); 4526 enable_clocks(0);
3725 4527
3726 return 0; 4528 return 0;
3727err2: 4529err2:
3728 iounmap(dsi.base); 4530 iounmap(dsi->base);
3729err1: 4531err1:
3730 destroy_workqueue(dsi.workqueue); 4532 kfree(dsi);
4533err0:
3731 return r; 4534 return r;
3732} 4535}
3733 4536
3734static void dsi_exit(void) 4537static void dsi_exit(struct platform_device *dsidev)
3735{ 4538{
3736 if (dsi.vdds_dsi_reg != NULL) { 4539 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3737 regulator_put(dsi.vdds_dsi_reg); 4540
3738 dsi.vdds_dsi_reg = NULL; 4541 if (dsi->vdds_dsi_reg != NULL) {
4542 if (dsi->vdds_dsi_enabled) {
4543 regulator_disable(dsi->vdds_dsi_reg);
4544 dsi->vdds_dsi_enabled = false;
4545 }
4546
4547 regulator_put(dsi->vdds_dsi_reg);
4548 dsi->vdds_dsi_reg = NULL;
3739 } 4549 }
3740 4550
3741 free_irq(dsi.irq, dsi.pdev); 4551 free_irq(dsi->irq, dsi->pdev);
3742 iounmap(dsi.base); 4552 iounmap(dsi->base);
3743 4553
3744 destroy_workqueue(dsi.workqueue); 4554 kfree(dsi);
3745 4555
3746 DSSDBG("omap_dsi_exit\n"); 4556 DSSDBG("omap_dsi_exit\n");
3747} 4557}
3748 4558
3749/* DSI1 HW IP initialisation */ 4559/* DSI1 HW IP initialisation */
3750static int omap_dsi1hw_probe(struct platform_device *pdev) 4560static int omap_dsi1hw_probe(struct platform_device *dsidev)
3751{ 4561{
3752 int r; 4562 int r;
3753 dsi.pdev = pdev; 4563
3754 r = dsi_init(pdev); 4564 r = dsi_init(dsidev);
3755 if (r) { 4565 if (r) {
3756 DSSERR("Failed to initialize DSI\n"); 4566 DSSERR("Failed to initialize DSI\n");
3757 goto err_dsi; 4567 goto err_dsi;
@@ -3760,9 +4570,12 @@ err_dsi:
3760 return r; 4570 return r;
3761} 4571}
3762 4572
3763static int omap_dsi1hw_remove(struct platform_device *pdev) 4573static int omap_dsi1hw_remove(struct platform_device *dsidev)
3764{ 4574{
3765 dsi_exit(); 4575 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4576
4577 dsi_exit(dsidev);
4578 WARN_ON(dsi->scp_clk_refcount > 0);
3766 return 0; 4579 return 0;
3767} 4580}
3768 4581
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 3f1fee63c678..d9489d5c4f08 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -29,7 +29,7 @@
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31 31
32#include <plat/display.h> 32#include <video/omapdss.h>
33#include <plat/clock.h> 33#include <plat/clock.h>
34#include "dss.h" 34#include "dss.h"
35#include "dss_features.h" 35#include "dss_features.h"
@@ -45,7 +45,6 @@ struct dss_reg {
45#define DSS_REVISION DSS_REG(0x0000) 45#define DSS_REVISION DSS_REG(0x0000)
46#define DSS_SYSCONFIG DSS_REG(0x0010) 46#define DSS_SYSCONFIG DSS_REG(0x0010)
47#define DSS_SYSSTATUS DSS_REG(0x0014) 47#define DSS_SYSSTATUS DSS_REG(0x0014)
48#define DSS_IRQSTATUS DSS_REG(0x0018)
49#define DSS_CONTROL DSS_REG(0x0040) 48#define DSS_CONTROL DSS_REG(0x0040)
50#define DSS_SDI_CONTROL DSS_REG(0x0044) 49#define DSS_SDI_CONTROL DSS_REG(0x0044)
51#define DSS_PLL_CONTROL DSS_REG(0x0048) 50#define DSS_PLL_CONTROL DSS_REG(0x0048)
@@ -75,17 +74,17 @@ static struct {
75 struct dss_clock_info cache_dss_cinfo; 74 struct dss_clock_info cache_dss_cinfo;
76 struct dispc_clock_info cache_dispc_cinfo; 75 struct dispc_clock_info cache_dispc_cinfo;
77 76
78 enum dss_clk_source dsi_clk_source; 77 enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI];
79 enum dss_clk_source dispc_clk_source; 78 enum omap_dss_clk_source dispc_clk_source;
80 enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; 79 enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
81 80
82 u32 ctx[DSS_SZ_REGS / sizeof(u32)]; 81 u32 ctx[DSS_SZ_REGS / sizeof(u32)];
83} dss; 82} dss;
84 83
85static const char * const dss_generic_clk_source_names[] = { 84static const char * const dss_generic_clk_source_names[] = {
86 [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", 85 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC",
87 [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", 86 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI",
88 [DSS_CLK_SRC_FCK] = "DSS_FCK", 87 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
89}; 88};
90 89
91static void dss_clk_enable_all_no_ctx(void); 90static void dss_clk_enable_all_no_ctx(void);
@@ -230,7 +229,7 @@ void dss_sdi_disable(void)
230 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ 229 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
231} 230}
232 231
233const char *dss_get_generic_clk_source_name(enum dss_clk_source clk_src) 232const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
234{ 233{
235 return dss_generic_clk_source_names[clk_src]; 234 return dss_generic_clk_source_names[clk_src];
236} 235}
@@ -246,8 +245,8 @@ void dss_dump_clocks(struct seq_file *s)
246 245
247 seq_printf(s, "- DSS -\n"); 246 seq_printf(s, "- DSS -\n");
248 247
249 fclk_name = dss_get_generic_clk_source_name(DSS_CLK_SRC_FCK); 248 fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
250 fclk_real_name = dss_feat_get_clk_source_name(DSS_CLK_SRC_FCK); 249 fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
251 fclk_rate = dss_clk_get_rate(DSS_CLK_FCK); 250 fclk_rate = dss_clk_get_rate(DSS_CLK_FCK);
252 251
253 if (dss.dpll4_m4_ck) { 252 if (dss.dpll4_m4_ck) {
@@ -286,7 +285,6 @@ void dss_dump_regs(struct seq_file *s)
286 DUMPREG(DSS_REVISION); 285 DUMPREG(DSS_REVISION);
287 DUMPREG(DSS_SYSCONFIG); 286 DUMPREG(DSS_SYSCONFIG);
288 DUMPREG(DSS_SYSSTATUS); 287 DUMPREG(DSS_SYSSTATUS);
289 DUMPREG(DSS_IRQSTATUS);
290 DUMPREG(DSS_CONTROL); 288 DUMPREG(DSS_CONTROL);
291 289
292 if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & 290 if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -300,18 +298,25 @@ void dss_dump_regs(struct seq_file *s)
300#undef DUMPREG 298#undef DUMPREG
301} 299}
302 300
303void dss_select_dispc_clk_source(enum dss_clk_source clk_src) 301void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
304{ 302{
303 struct platform_device *dsidev;
305 int b; 304 int b;
306 u8 start, end; 305 u8 start, end;
307 306
308 switch (clk_src) { 307 switch (clk_src) {
309 case DSS_CLK_SRC_FCK: 308 case OMAP_DSS_CLK_SRC_FCK:
310 b = 0; 309 b = 0;
311 break; 310 break;
312 case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 311 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
313 b = 1; 312 b = 1;
314 dsi_wait_pll_hsdiv_dispc_active(); 313 dsidev = dsi_get_dsidev_from_id(0);
314 dsi_wait_pll_hsdiv_dispc_active(dsidev);
315 break;
316 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
317 b = 2;
318 dsidev = dsi_get_dsidev_from_id(1);
319 dsi_wait_pll_hsdiv_dispc_active(dsidev);
315 break; 320 break;
316 default: 321 default:
317 BUG(); 322 BUG();
@@ -324,17 +329,27 @@ void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
324 dss.dispc_clk_source = clk_src; 329 dss.dispc_clk_source = clk_src;
325} 330}
326 331
327void dss_select_dsi_clk_source(enum dss_clk_source clk_src) 332void dss_select_dsi_clk_source(int dsi_module,
333 enum omap_dss_clk_source clk_src)
328{ 334{
335 struct platform_device *dsidev;
329 int b; 336 int b;
330 337
331 switch (clk_src) { 338 switch (clk_src) {
332 case DSS_CLK_SRC_FCK: 339 case OMAP_DSS_CLK_SRC_FCK:
333 b = 0; 340 b = 0;
334 break; 341 break;
335 case DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: 342 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI:
343 BUG_ON(dsi_module != 0);
344 b = 1;
345 dsidev = dsi_get_dsidev_from_id(0);
346 dsi_wait_pll_hsdiv_dsi_active(dsidev);
347 break;
348 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI:
349 BUG_ON(dsi_module != 1);
336 b = 1; 350 b = 1;
337 dsi_wait_pll_hsdiv_dsi_active(); 351 dsidev = dsi_get_dsidev_from_id(1);
352 dsi_wait_pll_hsdiv_dsi_active(dsidev);
338 break; 353 break;
339 default: 354 default:
340 BUG(); 355 BUG();
@@ -342,25 +357,33 @@ void dss_select_dsi_clk_source(enum dss_clk_source clk_src)
342 357
343 REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */ 358 REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
344 359
345 dss.dsi_clk_source = clk_src; 360 dss.dsi_clk_source[dsi_module] = clk_src;
346} 361}
347 362
348void dss_select_lcd_clk_source(enum omap_channel channel, 363void dss_select_lcd_clk_source(enum omap_channel channel,
349 enum dss_clk_source clk_src) 364 enum omap_dss_clk_source clk_src)
350{ 365{
366 struct platform_device *dsidev;
351 int b, ix, pos; 367 int b, ix, pos;
352 368
353 if (!dss_has_feature(FEAT_LCD_CLK_SRC)) 369 if (!dss_has_feature(FEAT_LCD_CLK_SRC))
354 return; 370 return;
355 371
356 switch (clk_src) { 372 switch (clk_src) {
357 case DSS_CLK_SRC_FCK: 373 case OMAP_DSS_CLK_SRC_FCK:
358 b = 0; 374 b = 0;
359 break; 375 break;
360 case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 376 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
361 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD); 377 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
362 b = 1; 378 b = 1;
363 dsi_wait_pll_hsdiv_dispc_active(); 379 dsidev = dsi_get_dsidev_from_id(0);
380 dsi_wait_pll_hsdiv_dispc_active(dsidev);
381 break;
382 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
383 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2);
384 b = 1;
385 dsidev = dsi_get_dsidev_from_id(1);
386 dsi_wait_pll_hsdiv_dispc_active(dsidev);
364 break; 387 break;
365 default: 388 default:
366 BUG(); 389 BUG();
@@ -373,20 +396,26 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
373 dss.lcd_clk_source[ix] = clk_src; 396 dss.lcd_clk_source[ix] = clk_src;
374} 397}
375 398
376enum dss_clk_source dss_get_dispc_clk_source(void) 399enum omap_dss_clk_source dss_get_dispc_clk_source(void)
377{ 400{
378 return dss.dispc_clk_source; 401 return dss.dispc_clk_source;
379} 402}
380 403
381enum dss_clk_source dss_get_dsi_clk_source(void) 404enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module)
382{ 405{
383 return dss.dsi_clk_source; 406 return dss.dsi_clk_source[dsi_module];
384} 407}
385 408
386enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) 409enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
387{ 410{
388 int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1; 411 if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
389 return dss.lcd_clk_source[ix]; 412 int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1;
413 return dss.lcd_clk_source[ix];
414 } else {
415 /* LCD_CLK source is the same as DISPC_FCLK source for
416 * OMAP2 and OMAP3 */
417 return dss.dispc_clk_source;
418 }
390} 419}
391 420
392/* calculate clock rates using dividers in cinfo */ 421/* calculate clock rates using dividers in cinfo */
@@ -659,13 +688,18 @@ static int dss_init(void)
659 * the kernel resets it */ 688 * the kernel resets it */
660 omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440); 689 omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
661 690
691#ifdef CONFIG_OMAP2_DSS_SLEEP_BEFORE_RESET
662 /* We need to wait here a bit, otherwise we sometimes start to 692 /* We need to wait here a bit, otherwise we sometimes start to
663 * get synclost errors, and after that only power cycle will 693 * get synclost errors, and after that only power cycle will
664 * restore DSS functionality. I have no idea why this happens. 694 * restore DSS functionality. I have no idea why this happens.
665 * And we have to wait _before_ resetting the DSS, but after 695 * And we have to wait _before_ resetting the DSS, but after
666 * enabling clocks. 696 * enabling clocks.
697 *
698 * This bug was at least present on OMAP3430. It's unknown
699 * if it happens on OMAP2 or OMAP3630.
667 */ 700 */
668 msleep(50); 701 msleep(50);
702#endif
669 703
670 _omap_dss_reset(); 704 _omap_dss_reset();
671 705
@@ -700,10 +734,11 @@ static int dss_init(void)
700 734
701 dss.dpll4_m4_ck = dpll4_m4_ck; 735 dss.dpll4_m4_ck = dpll4_m4_ck;
702 736
703 dss.dsi_clk_source = DSS_CLK_SRC_FCK; 737 dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
704 dss.dispc_clk_source = DSS_CLK_SRC_FCK; 738 dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
705 dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; 739 dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
706 dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; 740 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
741 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
707 742
708 dss_save_context(); 743 dss_save_context();
709 744
@@ -1015,6 +1050,14 @@ static void core_dump_clocks(struct seq_file *s)
1015 dss.dss_video_fck 1050 dss.dss_video_fck
1016 }; 1051 };
1017 1052
1053 const char *names[5] = {
1054 "ick",
1055 "fck",
1056 "sys_clk",
1057 "tv_fck",
1058 "video_fck"
1059 };
1060
1018 seq_printf(s, "- CORE -\n"); 1061 seq_printf(s, "- CORE -\n");
1019 1062
1020 seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled); 1063 seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
@@ -1022,8 +1065,11 @@ static void core_dump_clocks(struct seq_file *s)
1022 for (i = 0; i < 5; i++) { 1065 for (i = 0; i < 5; i++) {
1023 if (!clocks[i]) 1066 if (!clocks[i])
1024 continue; 1067 continue;
1025 seq_printf(s, "%-15s\t%lu\t%d\n", 1068 seq_printf(s, "%s (%s)%*s\t%lu\t%d\n",
1069 names[i],
1026 clocks[i]->name, 1070 clocks[i]->name,
1071 24 - strlen(names[i]) - strlen(clocks[i]->name),
1072 "",
1027 clk_get_rate(clocks[i]), 1073 clk_get_rate(clocks[i]),
1028 clocks[i]->usecount); 1074 clocks[i]->usecount);
1029 } 1075 }
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index c2f582bb19c0..8ab6d43329bb 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -117,15 +117,6 @@ enum dss_clock {
117 DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/ 117 DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/
118}; 118};
119 119
120enum dss_clk_source {
121 DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
122 * OMAP4: PLL1_CLK1 */
123 DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
124 * OMAP4: PLL1_CLK2 */
125 DSS_CLK_SRC_FCK, /* OMAP2/3: DSS1_ALWON_FCLK
126 * OMAP4: DSS_FCLK */
127};
128
129enum dss_hdmi_venc_clk_source_select { 120enum dss_hdmi_venc_clk_source_select {
130 DSS_VENC_TV_CLK = 0, 121 DSS_VENC_TV_CLK = 0,
131 DSS_HDMI_M_PCLK = 1, 122 DSS_HDMI_M_PCLK = 1,
@@ -236,7 +227,7 @@ void dss_clk_enable(enum dss_clock clks);
236void dss_clk_disable(enum dss_clock clks); 227void dss_clk_disable(enum dss_clock clks);
237unsigned long dss_clk_get_rate(enum dss_clock clk); 228unsigned long dss_clk_get_rate(enum dss_clock clk);
238int dss_need_ctx_restore(void); 229int dss_need_ctx_restore(void);
239const char *dss_get_generic_clk_source_name(enum dss_clk_source clk_src); 230const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
240void dss_dump_clocks(struct seq_file *s); 231void dss_dump_clocks(struct seq_file *s);
241 232
242void dss_dump_regs(struct seq_file *s); 233void dss_dump_regs(struct seq_file *s);
@@ -248,13 +239,14 @@ void dss_sdi_init(u8 datapairs);
248int dss_sdi_enable(void); 239int dss_sdi_enable(void);
249void dss_sdi_disable(void); 240void dss_sdi_disable(void);
250 241
251void dss_select_dispc_clk_source(enum dss_clk_source clk_src); 242void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src);
252void dss_select_dsi_clk_source(enum dss_clk_source clk_src); 243void dss_select_dsi_clk_source(int dsi_module,
244 enum omap_dss_clk_source clk_src);
253void dss_select_lcd_clk_source(enum omap_channel channel, 245void dss_select_lcd_clk_source(enum omap_channel channel,
254 enum dss_clk_source clk_src); 246 enum omap_dss_clk_source clk_src);
255enum dss_clk_source dss_get_dispc_clk_source(void); 247enum omap_dss_clk_source dss_get_dispc_clk_source(void);
256enum dss_clk_source dss_get_dsi_clk_source(void); 248enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module);
257enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); 249enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
258 250
259void dss_set_venc_output(enum omap_dss_venc_type type); 251void dss_set_venc_output(enum omap_dss_venc_type type);
260void dss_set_dac_pwrdn_bgz(bool enable); 252void dss_set_dac_pwrdn_bgz(bool enable);
@@ -284,31 +276,39 @@ static inline void sdi_exit(void)
284 276
285/* DSI */ 277/* DSI */
286#ifdef CONFIG_OMAP2_DSS_DSI 278#ifdef CONFIG_OMAP2_DSS_DSI
279
280struct dentry;
281struct file_operations;
282
287int dsi_init_platform_driver(void); 283int dsi_init_platform_driver(void);
288void dsi_uninit_platform_driver(void); 284void dsi_uninit_platform_driver(void);
289 285
290void dsi_dump_clocks(struct seq_file *s); 286void dsi_dump_clocks(struct seq_file *s);
291void dsi_dump_irqs(struct seq_file *s); 287void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
292void dsi_dump_regs(struct seq_file *s); 288 const struct file_operations *debug_fops);
289void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
290 const struct file_operations *debug_fops);
293 291
294void dsi_save_context(void); 292void dsi_save_context(void);
295void dsi_restore_context(void); 293void dsi_restore_context(void);
296 294
297int dsi_init_display(struct omap_dss_device *display); 295int dsi_init_display(struct omap_dss_device *display);
298void dsi_irq_handler(void); 296void dsi_irq_handler(void);
299unsigned long dsi_get_pll_hsdiv_dispc_rate(void); 297unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
300int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo); 298int dsi_pll_set_clock_div(struct platform_device *dsidev,
301int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck, 299 struct dsi_clock_info *cinfo);
302 struct dsi_clock_info *cinfo, 300int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
301 unsigned long req_pck, struct dsi_clock_info *cinfo,
303 struct dispc_clock_info *dispc_cinfo); 302 struct dispc_clock_info *dispc_cinfo);
304int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk, 303int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
305 bool enable_hsdiv); 304 bool enable_hsdiv);
306void dsi_pll_uninit(void); 305void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
307void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, 306void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
308 u32 fifo_size, enum omap_burst_size *burst_size, 307 u32 fifo_size, enum omap_burst_size *burst_size,
309 u32 *fifo_low, u32 *fifo_high); 308 u32 *fifo_low, u32 *fifo_high);
310void dsi_wait_pll_hsdiv_dispc_active(void); 309void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
311void dsi_wait_pll_hsdiv_dsi_active(void); 310void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
311struct platform_device *dsi_get_dsidev_from_id(int module);
312#else 312#else
313static inline int dsi_init_platform_driver(void) 313static inline int dsi_init_platform_driver(void)
314{ 314{
@@ -317,17 +317,47 @@ static inline int dsi_init_platform_driver(void)
317static inline void dsi_uninit_platform_driver(void) 317static inline void dsi_uninit_platform_driver(void)
318{ 318{
319} 319}
320static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(void) 320static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
321{ 321{
322 WARN("%s: DSI not compiled in, returning rate as 0\n", __func__); 322 WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
323 return 0; 323 return 0;
324} 324}
325static inline void dsi_wait_pll_hsdiv_dispc_active(void) 325static inline int dsi_pll_set_clock_div(struct platform_device *dsidev,
326 struct dsi_clock_info *cinfo)
327{
328 WARN("%s: DSI not compiled in\n", __func__);
329 return -ENODEV;
330}
331static inline int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
332 bool is_tft, unsigned long req_pck,
333 struct dsi_clock_info *dsi_cinfo,
334 struct dispc_clock_info *dispc_cinfo)
335{
336 WARN("%s: DSI not compiled in\n", __func__);
337 return -ENODEV;
338}
339static inline int dsi_pll_init(struct platform_device *dsidev,
340 bool enable_hsclk, bool enable_hsdiv)
326{ 341{
342 WARN("%s: DSI not compiled in\n", __func__);
343 return -ENODEV;
327} 344}
328static inline void dsi_wait_pll_hsdiv_dsi_active(void) 345static inline void dsi_pll_uninit(struct platform_device *dsidev,
346 bool disconnect_lanes)
329{ 347{
330} 348}
349static inline void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
350{
351}
352static inline void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
353{
354}
355static inline struct platform_device *dsi_get_dsidev_from_id(int module)
356{
357 WARN("%s: DSI not compiled in, returning platform device as NULL\n",
358 __func__);
359 return NULL;
360}
331#endif 361#endif
332 362
333/* DPI */ 363/* DPI */
@@ -391,7 +421,8 @@ int dispc_setup_plane(enum omap_plane plane,
391 enum omap_dss_rotation_type rotation_type, 421 enum omap_dss_rotation_type rotation_type,
392 u8 rotation, bool mirror, 422 u8 rotation, bool mirror,
393 u8 global_alpha, u8 pre_mult_alpha, 423 u8 global_alpha, u8 pre_mult_alpha,
394 enum omap_channel channel); 424 enum omap_channel channel,
425 u32 puv_addr);
395 426
396bool dispc_go_busy(enum omap_channel channel); 427bool dispc_go_busy(enum omap_channel channel);
397void dispc_go(enum omap_channel channel); 428void dispc_go(enum omap_channel channel);
@@ -485,13 +516,6 @@ void hdmi_panel_exit(void);
485int rfbi_init_platform_driver(void); 516int rfbi_init_platform_driver(void);
486void rfbi_uninit_platform_driver(void); 517void rfbi_uninit_platform_driver(void);
487void rfbi_dump_regs(struct seq_file *s); 518void rfbi_dump_regs(struct seq_file *s);
488
489int rfbi_configure(int rfbi_module, int bpp, int lines);
490void rfbi_enable_rfbi(bool enable);
491void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
492 u16 height, void (callback)(void *data), void *data);
493void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
494unsigned long rfbi_get_max_tx_rate(void);
495int rfbi_init_display(struct omap_dss_device *display); 519int rfbi_init_display(struct omap_dss_device *display);
496#else 520#else
497static inline int rfbi_init_platform_driver(void) 521static inline int rfbi_init_platform_driver(void)
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index aa1622241d0d..1c18888e5df3 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -22,7 +22,7 @@
22#include <linux/err.h> 22#include <linux/err.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24 24
25#include <plat/display.h> 25#include <video/omapdss.h>
26#include <plat/cpu.h> 26#include <plat/cpu.h>
27 27
28#include "dss.h" 28#include "dss.h"
@@ -52,7 +52,7 @@ struct omap_dss_features {
52}; 52};
53 53
54/* This struct is assigned to one of the below during initialization */ 54/* This struct is assigned to one of the below during initialization */
55static struct omap_dss_features *omap_current_dss_features; 55static const struct omap_dss_features *omap_current_dss_features;
56 56
57static const struct dss_reg_field omap2_dss_reg_fields[] = { 57static const struct dss_reg_field omap2_dss_reg_fields[] = {
58 [FEAT_REG_FIRHINC] = { 11, 0 }, 58 [FEAT_REG_FIRHINC] = { 11, 0 },
@@ -177,22 +177,55 @@ static const enum omap_color_mode omap3_dss_supported_color_modes[] = {
177 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32, 177 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
178}; 178};
179 179
180static const enum omap_color_mode omap4_dss_supported_color_modes[] = {
181 /* OMAP_DSS_GFX */
182 OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
183 OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
184 OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
185 OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
186 OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 |
187 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32 |
188 OMAP_DSS_COLOR_ARGB16_1555,
189
190 /* OMAP_DSS_VIDEO1 */
191 OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
192 OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
193 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
194 OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
195 OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
196 OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
197 OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
198 OMAP_DSS_COLOR_RGBX32,
199
200 /* OMAP_DSS_VIDEO2 */
201 OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
202 OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
203 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
204 OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
205 OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
206 OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
207 OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
208 OMAP_DSS_COLOR_RGBX32,
209};
210
180static const char * const omap2_dss_clk_source_names[] = { 211static const char * const omap2_dss_clk_source_names[] = {
181 [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A", 212 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A",
182 [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A", 213 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A",
183 [DSS_CLK_SRC_FCK] = "DSS_FCLK1", 214 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1",
184}; 215};
185 216
186static const char * const omap3_dss_clk_source_names[] = { 217static const char * const omap3_dss_clk_source_names[] = {
187 [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK", 218 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK",
188 [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK", 219 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK",
189 [DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK", 220 [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK",
190}; 221};
191 222
192static const char * const omap4_dss_clk_source_names[] = { 223static const char * const omap4_dss_clk_source_names[] = {
193 [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1", 224 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1",
194 [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2", 225 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2",
195 [DSS_CLK_SRC_FCK] = "DSS_FCLK", 226 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK",
227 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1",
228 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2",
196}; 229};
197 230
198static const struct dss_param_range omap2_dss_param_range[] = { 231static const struct dss_param_range omap2_dss_param_range[] = {
@@ -226,7 +259,7 @@ static const struct dss_param_range omap4_dss_param_range[] = {
226}; 259};
227 260
228/* OMAP2 DSS Features */ 261/* OMAP2 DSS Features */
229static struct omap_dss_features omap2_dss_features = { 262static const struct omap_dss_features omap2_dss_features = {
230 .reg_fields = omap2_dss_reg_fields, 263 .reg_fields = omap2_dss_reg_fields,
231 .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields), 264 .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields),
232 265
@@ -244,7 +277,7 @@ static struct omap_dss_features omap2_dss_features = {
244}; 277};
245 278
246/* OMAP3 DSS Features */ 279/* OMAP3 DSS Features */
247static struct omap_dss_features omap3430_dss_features = { 280static const struct omap_dss_features omap3430_dss_features = {
248 .reg_fields = omap3_dss_reg_fields, 281 .reg_fields = omap3_dss_reg_fields,
249 .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), 282 .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
250 283
@@ -252,7 +285,8 @@ static struct omap_dss_features omap3430_dss_features = {
252 FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL | 285 FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
253 FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | 286 FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
254 FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | 287 FEAT_FUNCGATED | FEAT_ROWREPEATENABLE |
255 FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF, 288 FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF |
289 FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC,
256 290
257 .num_mgrs = 2, 291 .num_mgrs = 2,
258 .num_ovls = 3, 292 .num_ovls = 3,
@@ -262,7 +296,7 @@ static struct omap_dss_features omap3430_dss_features = {
262 .dss_params = omap3_dss_param_range, 296 .dss_params = omap3_dss_param_range,
263}; 297};
264 298
265static struct omap_dss_features omap3630_dss_features = { 299static const struct omap_dss_features omap3630_dss_features = {
266 .reg_fields = omap3_dss_reg_fields, 300 .reg_fields = omap3_dss_reg_fields,
267 .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), 301 .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
268 302
@@ -271,7 +305,8 @@ static struct omap_dss_features omap3630_dss_features = {
271 FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | 305 FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
272 FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED | 306 FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED |
273 FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT | 307 FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT |
274 FEAT_RESIZECONF, 308 FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG |
309 FEAT_DSI_PLL_FREQSEL,
275 310
276 .num_mgrs = 2, 311 .num_mgrs = 2,
277 .num_ovls = 3, 312 .num_ovls = 3,
@@ -282,19 +317,43 @@ static struct omap_dss_features omap3630_dss_features = {
282}; 317};
283 318
284/* OMAP4 DSS Features */ 319/* OMAP4 DSS Features */
285static struct omap_dss_features omap4_dss_features = { 320/* For OMAP4430 ES 1.0 revision */
321static const struct omap_dss_features omap4430_es1_0_dss_features = {
286 .reg_fields = omap4_dss_reg_fields, 322 .reg_fields = omap4_dss_reg_fields,
287 .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields), 323 .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
288 324
289 .has_feature = 325 .has_feature =
290 FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA | 326 FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
291 FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 | 327 FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
292 FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC, 328 FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
329 FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
330 FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
293 331
294 .num_mgrs = 3, 332 .num_mgrs = 3,
295 .num_ovls = 3, 333 .num_ovls = 3,
296 .supported_displays = omap4_dss_supported_displays, 334 .supported_displays = omap4_dss_supported_displays,
297 .supported_color_modes = omap3_dss_supported_color_modes, 335 .supported_color_modes = omap4_dss_supported_color_modes,
336 .clksrc_names = omap4_dss_clk_source_names,
337 .dss_params = omap4_dss_param_range,
338};
339
340/* For all the other OMAP4 versions */
341static const struct omap_dss_features omap4_dss_features = {
342 .reg_fields = omap4_dss_reg_fields,
343 .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
344
345 .has_feature =
346 FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
347 FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
348 FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
349 FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
350 FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE |
351 FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
352
353 .num_mgrs = 3,
354 .num_ovls = 3,
355 .supported_displays = omap4_dss_supported_displays,
356 .supported_color_modes = omap4_dss_supported_color_modes,
298 .clksrc_names = omap4_dss_clk_source_names, 357 .clksrc_names = omap4_dss_clk_source_names,
299 .dss_params = omap4_dss_param_range, 358 .dss_params = omap4_dss_param_range,
300}; 359};
@@ -337,7 +396,7 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
337 color_mode; 396 color_mode;
338} 397}
339 398
340const char *dss_feat_get_clk_source_name(enum dss_clk_source id) 399const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
341{ 400{
342 return omap_current_dss_features->clksrc_names[id]; 401 return omap_current_dss_features->clksrc_names[id];
343} 402}
@@ -365,6 +424,10 @@ void dss_features_init(void)
365 omap_current_dss_features = &omap3630_dss_features; 424 omap_current_dss_features = &omap3630_dss_features;
366 else if (cpu_is_omap34xx()) 425 else if (cpu_is_omap34xx())
367 omap_current_dss_features = &omap3430_dss_features; 426 omap_current_dss_features = &omap3430_dss_features;
368 else 427 else if (omap_rev() == OMAP4430_REV_ES1_0)
428 omap_current_dss_features = &omap4430_es1_0_dss_features;
429 else if (cpu_is_omap44xx())
369 omap_current_dss_features = &omap4_dss_features; 430 omap_current_dss_features = &omap4_dss_features;
431 else
432 DSSWARN("Unsupported OMAP version");
370} 433}
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 12e9c4ef0dec..07b346f7d916 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -23,23 +23,34 @@
23#define MAX_DSS_MANAGERS 3 23#define MAX_DSS_MANAGERS 3
24#define MAX_DSS_OVERLAYS 3 24#define MAX_DSS_OVERLAYS 3
25#define MAX_DSS_LCD_MANAGERS 2 25#define MAX_DSS_LCD_MANAGERS 2
26#define MAX_NUM_DSI 2
26 27
27/* DSS has feature id */ 28/* DSS has feature id */
28enum dss_feat_id { 29enum dss_feat_id {
29 FEAT_GLOBAL_ALPHA = 1 << 0, 30 FEAT_GLOBAL_ALPHA = 1 << 0,
30 FEAT_GLOBAL_ALPHA_VID1 = 1 << 1, 31 FEAT_GLOBAL_ALPHA_VID1 = 1 << 1,
31 FEAT_PRE_MULT_ALPHA = 1 << 2, 32 FEAT_PRE_MULT_ALPHA = 1 << 2,
32 FEAT_LCDENABLEPOL = 1 << 3, 33 FEAT_LCDENABLEPOL = 1 << 3,
33 FEAT_LCDENABLESIGNAL = 1 << 4, 34 FEAT_LCDENABLESIGNAL = 1 << 4,
34 FEAT_PCKFREEENABLE = 1 << 5, 35 FEAT_PCKFREEENABLE = 1 << 5,
35 FEAT_FUNCGATED = 1 << 6, 36 FEAT_FUNCGATED = 1 << 6,
36 FEAT_MGR_LCD2 = 1 << 7, 37 FEAT_MGR_LCD2 = 1 << 7,
37 FEAT_LINEBUFFERSPLIT = 1 << 8, 38 FEAT_LINEBUFFERSPLIT = 1 << 8,
38 FEAT_ROWREPEATENABLE = 1 << 9, 39 FEAT_ROWREPEATENABLE = 1 << 9,
39 FEAT_RESIZECONF = 1 << 10, 40 FEAT_RESIZECONF = 1 << 10,
40 /* Independent core clk divider */ 41 /* Independent core clk divider */
41 FEAT_CORE_CLK_DIV = 1 << 11, 42 FEAT_CORE_CLK_DIV = 1 << 11,
42 FEAT_LCD_CLK_SRC = 1 << 12, 43 FEAT_LCD_CLK_SRC = 1 << 12,
44 /* DSI-PLL power command 0x3 is not working */
45 FEAT_DSI_PLL_PWR_BUG = 1 << 13,
46 FEAT_DSI_PLL_FREQSEL = 1 << 14,
47 FEAT_DSI_DCS_CMD_CONFIG_VC = 1 << 15,
48 FEAT_DSI_VC_OCP_WIDTH = 1 << 16,
49 FEAT_DSI_REVERSE_TXCLKESC = 1 << 17,
50 FEAT_DSI_GNQ = 1 << 18,
51 FEAT_HDMI_CTS_SWMODE = 1 << 19,
52 FEAT_HANDLE_UV_SEPARATE = 1 << 20,
53 FEAT_ATTR2 = 1 << 21,
43}; 54};
44 55
45/* DSS register field id */ 56/* DSS register field id */
@@ -77,7 +88,7 @@ enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel
77enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane); 88enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
78bool dss_feat_color_mode_supported(enum omap_plane plane, 89bool dss_feat_color_mode_supported(enum omap_plane plane,
79 enum omap_color_mode color_mode); 90 enum omap_color_mode color_mode);
80const char *dss_feat_get_clk_source_name(enum dss_clk_source id); 91const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
81 92
82bool dss_has_feature(enum dss_feat_id id); 93bool dss_has_feature(enum dss_feat_id id);
83void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); 94void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index a981def8099a..b0555f4f0a78 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -29,10 +29,16 @@
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <plat/display.h> 32#include <video/omapdss.h>
33#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
34 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
35#include <sound/soc.h>
36#include <sound/pcm_params.h>
37#endif
33 38
34#include "dss.h" 39#include "dss.h"
35#include "hdmi.h" 40#include "hdmi.h"
41#include "dss_features.h"
36 42
37static struct { 43static struct {
38 struct mutex lock; 44 struct mutex lock;
@@ -1052,25 +1058,26 @@ static void update_hdmi_timings(struct hdmi_config *cfg,
1052 cfg->timings.hsync_pol = cea_vesa_timings[code].hsync_pol; 1058 cfg->timings.hsync_pol = cea_vesa_timings[code].hsync_pol;
1053} 1059}
1054 1060
1055static void hdmi_compute_pll(unsigned long clkin, int phy, 1061static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
1056 int n, struct hdmi_pll_info *pi) 1062 struct hdmi_pll_info *pi)
1057{ 1063{
1058 unsigned long refclk; 1064 unsigned long clkin, refclk;
1059 u32 mf; 1065 u32 mf;
1060 1066
1067 clkin = dss_clk_get_rate(DSS_CLK_SYSCK) / 10000;
1061 /* 1068 /*
1062 * Input clock is predivided by N + 1 1069 * Input clock is predivided by N + 1
1063 * out put of which is reference clk 1070 * out put of which is reference clk
1064 */ 1071 */
1065 refclk = clkin / (n + 1); 1072 pi->regn = dssdev->clocks.hdmi.regn;
1066 pi->regn = n; 1073 refclk = clkin / (pi->regn + 1);
1067 1074
1068 /* 1075 /*
1069 * multiplier is pixel_clk/ref_clk 1076 * multiplier is pixel_clk/ref_clk
1070 * Multiplying by 100 to avoid fractional part removal 1077 * Multiplying by 100 to avoid fractional part removal
1071 */ 1078 */
1072 pi->regm = (phy * 100/(refclk))/100; 1079 pi->regm = (phy * 100 / (refclk)) / 100;
1073 pi->regm2 = 1; 1080 pi->regm2 = dssdev->clocks.hdmi.regm2;
1074 1081
1075 /* 1082 /*
1076 * fractional multiplier is remainder of the difference between 1083 * fractional multiplier is remainder of the difference between
@@ -1078,14 +1085,14 @@ static void hdmi_compute_pll(unsigned long clkin, int phy,
1078 * multiplied by 2^18(262144) divided by the reference clock 1085 * multiplied by 2^18(262144) divided by the reference clock
1079 */ 1086 */
1080 mf = (phy - pi->regm * refclk) * 262144; 1087 mf = (phy - pi->regm * refclk) * 262144;
1081 pi->regmf = mf/(refclk); 1088 pi->regmf = mf / (refclk);
1082 1089
1083 /* 1090 /*
1084 * Dcofreq should be set to 1 if required pixel clock 1091 * Dcofreq should be set to 1 if required pixel clock
1085 * is greater than 1000MHz 1092 * is greater than 1000MHz
1086 */ 1093 */
1087 pi->dcofreq = phy > 1000 * 100; 1094 pi->dcofreq = phy > 1000 * 100;
1088 pi->regsd = ((pi->regm * clkin / 10) / ((n + 1) * 250) + 5) / 10; 1095 pi->regsd = ((pi->regm * clkin / 10) / ((pi->regn + 1) * 250) + 5) / 10;
1089 1096
1090 DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf); 1097 DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf);
1091 DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd); 1098 DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
@@ -1106,7 +1113,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
1106 int r, code = 0; 1113 int r, code = 0;
1107 struct hdmi_pll_info pll_data; 1114 struct hdmi_pll_info pll_data;
1108 struct omap_video_timings *p; 1115 struct omap_video_timings *p;
1109 int clkin, n, phy; 1116 unsigned long phy;
1110 1117
1111 hdmi_enable_clocks(1); 1118 hdmi_enable_clocks(1);
1112 1119
@@ -1126,11 +1133,9 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
1126 dssdev->panel.timings = cea_vesa_timings[code].timings; 1133 dssdev->panel.timings = cea_vesa_timings[code].timings;
1127 update_hdmi_timings(&hdmi.cfg, p, code); 1134 update_hdmi_timings(&hdmi.cfg, p, code);
1128 1135
1129 clkin = 3840; /* 38.4 MHz */
1130 n = 15; /* this is a constant for our math */
1131 phy = p->pixel_clock; 1136 phy = p->pixel_clock;
1132 1137
1133 hdmi_compute_pll(clkin, phy, n, &pll_data); 1138 hdmi_compute_pll(dssdev, phy, &pll_data);
1134 1139
1135 hdmi_wp_video_start(0); 1140 hdmi_wp_video_start(0);
1136 1141
@@ -1160,7 +1165,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
1160 * dynamically by user. This can be moved to single location , say 1165 * dynamically by user. This can be moved to single location , say
1161 * Boardfile. 1166 * Boardfile.
1162 */ 1167 */
1163 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); 1168 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
1164 1169
1165 /* bypass TV gamma table */ 1170 /* bypass TV gamma table */
1166 dispc_enable_gamma_table(0); 1171 dispc_enable_gamma_table(0);
@@ -1275,10 +1280,420 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
1275 mutex_unlock(&hdmi.lock); 1280 mutex_unlock(&hdmi.lock);
1276} 1281}
1277 1282
1283#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
1284 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
1285static void hdmi_wp_audio_config_format(
1286 struct hdmi_audio_format *aud_fmt)
1287{
1288 u32 r;
1289
1290 DSSDBG("Enter hdmi_wp_audio_config_format\n");
1291
1292 r = hdmi_read_reg(HDMI_WP_AUDIO_CFG);
1293 r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
1294 r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
1295 r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5);
1296 r = FLD_MOD(r, aud_fmt->type, 4, 4);
1297 r = FLD_MOD(r, aud_fmt->justification, 3, 3);
1298 r = FLD_MOD(r, aud_fmt->sample_order, 2, 2);
1299 r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1);
1300 r = FLD_MOD(r, aud_fmt->sample_size, 0, 0);
1301 hdmi_write_reg(HDMI_WP_AUDIO_CFG, r);
1302}
1303
1304static void hdmi_wp_audio_config_dma(struct hdmi_audio_dma *aud_dma)
1305{
1306 u32 r;
1307
1308 DSSDBG("Enter hdmi_wp_audio_config_dma\n");
1309
1310 r = hdmi_read_reg(HDMI_WP_AUDIO_CFG2);
1311 r = FLD_MOD(r, aud_dma->transfer_size, 15, 8);
1312 r = FLD_MOD(r, aud_dma->block_size, 7, 0);
1313 hdmi_write_reg(HDMI_WP_AUDIO_CFG2, r);
1314
1315 r = hdmi_read_reg(HDMI_WP_AUDIO_CTRL);
1316 r = FLD_MOD(r, aud_dma->mode, 9, 9);
1317 r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0);
1318 hdmi_write_reg(HDMI_WP_AUDIO_CTRL, r);
1319}
1320
1321static void hdmi_core_audio_config(struct hdmi_core_audio_config *cfg)
1322{
1323 u32 r;
1324
1325 /* audio clock recovery parameters */
1326 r = hdmi_read_reg(HDMI_CORE_AV_ACR_CTRL);
1327 r = FLD_MOD(r, cfg->use_mclk, 2, 2);
1328 r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1);
1329 r = FLD_MOD(r, cfg->cts_mode, 0, 0);
1330 hdmi_write_reg(HDMI_CORE_AV_ACR_CTRL, r);
1331
1332 REG_FLD_MOD(HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0);
1333 REG_FLD_MOD(HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0);
1334 REG_FLD_MOD(HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0);
1335
1336 if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) {
1337 REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0);
1338 REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0);
1339 REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0);
1340 } else {
1341 /*
1342 * HDMI IP uses this configuration to divide the MCLK to
1343 * update CTS value.
1344 */
1345 REG_FLD_MOD(HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
1346
1347 /* Configure clock for audio packets */
1348 REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_1,
1349 cfg->aud_par_busclk, 7, 0);
1350 REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_2,
1351 (cfg->aud_par_busclk >> 8), 7, 0);
1352 REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_3,
1353 (cfg->aud_par_busclk >> 16), 7, 0);
1354 }
1355
1356 /* Override of SPDIF sample frequency with value in I2S_CHST4 */
1357 REG_FLD_MOD(HDMI_CORE_AV_SPDIF_CTRL, cfg->fs_override, 1, 1);
1358
1359 /* I2S parameters */
1360 REG_FLD_MOD(HDMI_CORE_AV_I2S_CHST4, cfg->freq_sample, 3, 0);
1361
1362 r = hdmi_read_reg(HDMI_CORE_AV_I2S_IN_CTRL);
1363 r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
1364 r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
1365 r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
1366 r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
1367 r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
1368 r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
1369 r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
1370 r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
1371 hdmi_write_reg(HDMI_CORE_AV_I2S_IN_CTRL, r);
1372
1373 r = hdmi_read_reg(HDMI_CORE_AV_I2S_CHST5);
1374 r = FLD_MOD(r, cfg->freq_sample, 7, 4);
1375 r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
1376 r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
1377 hdmi_write_reg(HDMI_CORE_AV_I2S_CHST5, r);
1378
1379 REG_FLD_MOD(HDMI_CORE_AV_I2S_IN_LEN, cfg->i2s_cfg.in_length_bits, 3, 0);
1380
1381 /* Audio channels and mode parameters */
1382 REG_FLD_MOD(HDMI_CORE_AV_HDMI_CTRL, cfg->layout, 2, 1);
1383 r = hdmi_read_reg(HDMI_CORE_AV_AUD_MODE);
1384 r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4);
1385 r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3);
1386 r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
1387 r = FLD_MOD(r, cfg->en_spdif, 1, 1);
1388 hdmi_write_reg(HDMI_CORE_AV_AUD_MODE, r);
1389}
1390
1391static void hdmi_core_audio_infoframe_config(
1392 struct hdmi_core_infoframe_audio *info_aud)
1393{
1394 u8 val;
1395 u8 sum = 0, checksum = 0;
1396
1397 /*
1398 * Set audio info frame type, version and length as
1399 * described in HDMI 1.4a Section 8.2.2 specification.
1400 * Checksum calculation is defined in Section 5.3.5.
1401 */
1402 hdmi_write_reg(HDMI_CORE_AV_AUDIO_TYPE, 0x84);
1403 hdmi_write_reg(HDMI_CORE_AV_AUDIO_VERS, 0x01);
1404 hdmi_write_reg(HDMI_CORE_AV_AUDIO_LEN, 0x0a);
1405 sum += 0x84 + 0x001 + 0x00a;
1406
1407 val = (info_aud->db1_coding_type << 4)
1408 | (info_aud->db1_channel_count - 1);
1409 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(0), val);
1410 sum += val;
1411
1412 val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
1413 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(1), val);
1414 sum += val;
1415
1416 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
1417
1418 val = info_aud->db4_channel_alloc;
1419 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(3), val);
1420 sum += val;
1421
1422 val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
1423 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(4), val);
1424 sum += val;
1425
1426 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
1427 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
1428 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(7), 0x00);
1429 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(8), 0x00);
1430 hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(9), 0x00);
1431
1432 checksum = 0x100 - sum;
1433 hdmi_write_reg(HDMI_CORE_AV_AUDIO_CHSUM, checksum);
1434
1435 /*
1436 * TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing
1437 * is available.
1438 */
1439}
1440
1441static int hdmi_config_audio_acr(u32 sample_freq, u32 *n, u32 *cts)
1442{
1443 u32 r;
1444 u32 deep_color = 0;
1445 u32 pclk = hdmi.cfg.timings.timings.pixel_clock;
1446
1447 if (n == NULL || cts == NULL)
1448 return -EINVAL;
1449 /*
1450 * Obtain current deep color configuration. This needed
1451 * to calculate the TMDS clock based on the pixel clock.
1452 */
1453 r = REG_GET(HDMI_WP_VIDEO_CFG, 1, 0);
1454 switch (r) {
1455 case 1: /* No deep color selected */
1456 deep_color = 100;
1457 break;
1458 case 2: /* 10-bit deep color selected */
1459 deep_color = 125;
1460 break;
1461 case 3: /* 12-bit deep color selected */
1462 deep_color = 150;
1463 break;
1464 default:
1465 return -EINVAL;
1466 }
1467
1468 switch (sample_freq) {
1469 case 32000:
1470 if ((deep_color == 125) && ((pclk == 54054)
1471 || (pclk == 74250)))
1472 *n = 8192;
1473 else
1474 *n = 4096;
1475 break;
1476 case 44100:
1477 *n = 6272;
1478 break;
1479 case 48000:
1480 if ((deep_color == 125) && ((pclk == 54054)
1481 || (pclk == 74250)))
1482 *n = 8192;
1483 else
1484 *n = 6144;
1485 break;
1486 default:
1487 *n = 0;
1488 return -EINVAL;
1489 }
1490
1491 /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
1492 *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
1493
1494 return 0;
1495}
1496
1497static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
1498 struct snd_pcm_hw_params *params,
1499 struct snd_soc_dai *dai)
1500{
1501 struct hdmi_audio_format audio_format;
1502 struct hdmi_audio_dma audio_dma;
1503 struct hdmi_core_audio_config core_cfg;
1504 struct hdmi_core_infoframe_audio aud_if_cfg;
1505 int err, n, cts;
1506 enum hdmi_core_audio_sample_freq sample_freq;
1507
1508 switch (params_format(params)) {
1509 case SNDRV_PCM_FORMAT_S16_LE:
1510 core_cfg.i2s_cfg.word_max_length =
1511 HDMI_AUDIO_I2S_MAX_WORD_20BITS;
1512 core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
1513 core_cfg.i2s_cfg.in_length_bits =
1514 HDMI_AUDIO_I2S_INPUT_LENGTH_16;
1515 core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
1516 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
1517 audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
1518 audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
1519 audio_dma.transfer_size = 0x10;
1520 break;
1521 case SNDRV_PCM_FORMAT_S24_LE:
1522 core_cfg.i2s_cfg.word_max_length =
1523 HDMI_AUDIO_I2S_MAX_WORD_24BITS;
1524 core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
1525 core_cfg.i2s_cfg.in_length_bits =
1526 HDMI_AUDIO_I2S_INPUT_LENGTH_24;
1527 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
1528 audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
1529 audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
1530 core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
1531 audio_dma.transfer_size = 0x20;
1532 break;
1533 default:
1534 return -EINVAL;
1535 }
1536
1537 switch (params_rate(params)) {
1538 case 32000:
1539 sample_freq = HDMI_AUDIO_FS_32000;
1540 break;
1541 case 44100:
1542 sample_freq = HDMI_AUDIO_FS_44100;
1543 break;
1544 case 48000:
1545 sample_freq = HDMI_AUDIO_FS_48000;
1546 break;
1547 default:
1548 return -EINVAL;
1549 }
1550
1551 err = hdmi_config_audio_acr(params_rate(params), &n, &cts);
1552 if (err < 0)
1553 return err;
1554
1555 /* Audio wrapper config */
1556 audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
1557 audio_format.active_chnnls_msk = 0x03;
1558 audio_format.type = HDMI_AUDIO_TYPE_LPCM;
1559 audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
1560 /* Disable start/stop signals of IEC 60958 blocks */
1561 audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF;
1562
1563 audio_dma.block_size = 0xC0;
1564 audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
1565 audio_dma.fifo_threshold = 0x20; /* in number of samples */
1566
1567 hdmi_wp_audio_config_dma(&audio_dma);
1568 hdmi_wp_audio_config_format(&audio_format);
1569
1570 /*
1571 * I2S config
1572 */
1573 core_cfg.i2s_cfg.en_high_bitrate_aud = false;
1574 /* Only used with high bitrate audio */
1575 core_cfg.i2s_cfg.cbit_order = false;
1576 /* Serial data and word select should change on sck rising edge */
1577 core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
1578 core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
1579 /* Set I2S word select polarity */
1580 core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
1581 core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
1582 /* Set serial data to word select shift. See Phillips spec. */
1583 core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
1584 /* Enable one of the four available serial data channels */
1585 core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
1586
1587 /* Core audio config */
1588 core_cfg.freq_sample = sample_freq;
1589 core_cfg.n = n;
1590 core_cfg.cts = cts;
1591 if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
1592 core_cfg.aud_par_busclk = 0;
1593 core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
1594 core_cfg.use_mclk = false;
1595 } else {
1596 core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8);
1597 core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
1598 core_cfg.use_mclk = true;
1599 core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS;
1600 }
1601 core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH;
1602 core_cfg.en_spdif = false;
1603 /* Use sample frequency from channel status word */
1604 core_cfg.fs_override = true;
1605 /* Enable ACR packets */
1606 core_cfg.en_acr_pkt = true;
1607 /* Disable direct streaming digital audio */
1608 core_cfg.en_dsd_audio = false;
1609 /* Use parallel audio interface */
1610 core_cfg.en_parallel_aud_input = true;
1611
1612 hdmi_core_audio_config(&core_cfg);
1613
1614 /*
1615 * Configure packet
1616 * info frame audio see doc CEA861-D page 74
1617 */
1618 aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
1619 aud_if_cfg.db1_channel_count = 2;
1620 aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
1621 aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
1622 aud_if_cfg.db4_channel_alloc = 0x00;
1623 aud_if_cfg.db5_downmix_inh = false;
1624 aud_if_cfg.db5_lsv = 0;
1625
1626 hdmi_core_audio_infoframe_config(&aud_if_cfg);
1627 return 0;
1628}
1629
1630static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
1631 struct snd_soc_dai *dai)
1632{
1633 int err = 0;
1634 switch (cmd) {
1635 case SNDRV_PCM_TRIGGER_START:
1636 case SNDRV_PCM_TRIGGER_RESUME:
1637 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1638 REG_FLD_MOD(HDMI_CORE_AV_AUD_MODE, 1, 0, 0);
1639 REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 1, 31, 31);
1640 REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 1, 30, 30);
1641 break;
1642
1643 case SNDRV_PCM_TRIGGER_STOP:
1644 case SNDRV_PCM_TRIGGER_SUSPEND:
1645 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1646 REG_FLD_MOD(HDMI_CORE_AV_AUD_MODE, 0, 0, 0);
1647 REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 0, 30, 30);
1648 REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 0, 31, 31);
1649 break;
1650 default:
1651 err = -EINVAL;
1652 }
1653 return err;
1654}
1655
1656static int hdmi_audio_startup(struct snd_pcm_substream *substream,
1657 struct snd_soc_dai *dai)
1658{
1659 if (!hdmi.mode) {
1660 pr_err("Current video settings do not support audio.\n");
1661 return -EIO;
1662 }
1663 return 0;
1664}
1665
1666static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
1667};
1668
1669static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
1670 .hw_params = hdmi_audio_hw_params,
1671 .trigger = hdmi_audio_trigger,
1672 .startup = hdmi_audio_startup,
1673};
1674
1675static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
1676 .name = "hdmi-audio-codec",
1677 .playback = {
1678 .channels_min = 2,
1679 .channels_max = 2,
1680 .rates = SNDRV_PCM_RATE_32000 |
1681 SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
1682 .formats = SNDRV_PCM_FMTBIT_S16_LE |
1683 SNDRV_PCM_FMTBIT_S24_LE,
1684 },
1685 .ops = &hdmi_audio_codec_ops,
1686};
1687#endif
1688
1278/* HDMI HW IP initialisation */ 1689/* HDMI HW IP initialisation */
1279static int omapdss_hdmihw_probe(struct platform_device *pdev) 1690static int omapdss_hdmihw_probe(struct platform_device *pdev)
1280{ 1691{
1281 struct resource *hdmi_mem; 1692 struct resource *hdmi_mem;
1693#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
1694 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
1695 int ret;
1696#endif
1282 1697
1283 hdmi.pdata = pdev->dev.platform_data; 1698 hdmi.pdata = pdev->dev.platform_data;
1284 hdmi.pdev = pdev; 1699 hdmi.pdev = pdev;
@@ -1300,6 +1715,17 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
1300 1715
1301 hdmi_panel_init(); 1716 hdmi_panel_init();
1302 1717
1718#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
1719 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
1720
1721 /* Register ASoC codec DAI */
1722 ret = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
1723 &hdmi_codec_dai_drv, 1);
1724 if (ret) {
1725 DSSERR("can't register ASoC HDMI audio codec\n");
1726 return ret;
1727 }
1728#endif
1303 return 0; 1729 return 0;
1304} 1730}
1305 1731
@@ -1307,6 +1733,11 @@ static int omapdss_hdmihw_remove(struct platform_device *pdev)
1307{ 1733{
1308 hdmi_panel_exit(); 1734 hdmi_panel_exit();
1309 1735
1736#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
1737 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
1738 snd_soc_unregister_codec(&pdev->dev);
1739#endif
1740
1310 iounmap(hdmi.base_wp); 1741 iounmap(hdmi.base_wp);
1311 1742
1312 return 0; 1743 return 0;
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h
index 9887ab96da3c..c885f9cb0659 100644
--- a/drivers/video/omap2/dss/hdmi.h
+++ b/drivers/video/omap2/dss/hdmi.h
@@ -22,7 +22,7 @@
22#define _OMAP4_DSS_HDMI_H_ 22#define _OMAP4_DSS_HDMI_H_
23 23
24#include <linux/string.h> 24#include <linux/string.h>
25#include <plat/display.h> 25#include <video/omapdss.h>
26 26
27#define HDMI_WP 0x0 27#define HDMI_WP 0x0
28#define HDMI_CORE_SYS 0x400 28#define HDMI_CORE_SYS 0x400
@@ -48,6 +48,10 @@ struct hdmi_reg { u16 idx; };
48#define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68) 48#define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68)
49#define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C) 49#define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C)
50#define HDMI_WP_WP_CLK HDMI_WP_REG(0x70) 50#define HDMI_WP_WP_CLK HDMI_WP_REG(0x70)
51#define HDMI_WP_AUDIO_CFG HDMI_WP_REG(0x80)
52#define HDMI_WP_AUDIO_CFG2 HDMI_WP_REG(0x84)
53#define HDMI_WP_AUDIO_CTRL HDMI_WP_REG(0x88)
54#define HDMI_WP_AUDIO_DATA HDMI_WP_REG(0x8C)
51 55
52/* HDMI IP Core System */ 56/* HDMI IP Core System */
53#define HDMI_CORE_SYS_REG(idx) HDMI_REG(HDMI_CORE_SYS + idx) 57#define HDMI_CORE_SYS_REG(idx) HDMI_REG(HDMI_CORE_SYS + idx)
@@ -105,6 +109,8 @@ struct hdmi_reg { u16 idx; };
105#define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15) 109#define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15)
106#define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190) 110#define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190)
107#define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27) 111#define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
112#define HDMI_CORE_AV_AUD_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x210)
113#define HDMI_CORE_AV_AUD_DBYTE_NELEMS HDMI_CORE_AV_REG(10)
108#define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290) 114#define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290)
109#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27) 115#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27)
110#define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300) 116#define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300)
@@ -153,6 +159,10 @@ struct hdmi_reg { u16 idx; };
153#define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184) 159#define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184)
154#define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188) 160#define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188)
155#define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C) 161#define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C)
162#define HDMI_CORE_AV_AUDIO_TYPE HDMI_CORE_AV_REG(0x200)
163#define HDMI_CORE_AV_AUDIO_VERS HDMI_CORE_AV_REG(0x204)
164#define HDMI_CORE_AV_AUDIO_LEN HDMI_CORE_AV_REG(0x208)
165#define HDMI_CORE_AV_AUDIO_CHSUM HDMI_CORE_AV_REG(0x20C)
156#define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280) 166#define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280)
157#define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284) 167#define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284)
158#define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288) 168#define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288)
@@ -272,7 +282,7 @@ enum hdmi_core_packet_ctrl {
272 HDMI_PACKETREPEATOFF = 0 282 HDMI_PACKETREPEATOFF = 0
273}; 283};
274 284
275/* INFOFRAME_AVI_ definitions */ 285/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
276enum hdmi_core_infoframe { 286enum hdmi_core_infoframe {
277 HDMI_INFOFRAME_AVI_DB1Y_RGB = 0, 287 HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
278 HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1, 288 HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
@@ -317,7 +327,36 @@ enum hdmi_core_infoframe {
317 HDMI_INFOFRAME_AVI_DB5PR_7 = 6, 327 HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
318 HDMI_INFOFRAME_AVI_DB5PR_8 = 7, 328 HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
319 HDMI_INFOFRAME_AVI_DB5PR_9 = 8, 329 HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
320 HDMI_INFOFRAME_AVI_DB5PR_10 = 9 330 HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
331 HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
332 HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
333 HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
334 HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
335 HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
336 HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
337 HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
338 HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
339 HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
340 HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
341 HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
342 HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
343 HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
344 HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
345 HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
346 HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
347 HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
348 HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
349 HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
350 HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
351 HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
352 HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
353 HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
354 HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
355 HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
356 HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
357 HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
358 HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
359 HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
321}; 360};
322 361
323enum hdmi_packing_mode { 362enum hdmi_packing_mode {
@@ -327,6 +366,121 @@ enum hdmi_packing_mode {
327 HDMI_PACK_ALREADYPACKED = 7 366 HDMI_PACK_ALREADYPACKED = 7
328}; 367};
329 368
369enum hdmi_core_audio_sample_freq {
370 HDMI_AUDIO_FS_32000 = 0x3,
371 HDMI_AUDIO_FS_44100 = 0x0,
372 HDMI_AUDIO_FS_48000 = 0x2,
373 HDMI_AUDIO_FS_88200 = 0x8,
374 HDMI_AUDIO_FS_96000 = 0xA,
375 HDMI_AUDIO_FS_176400 = 0xC,
376 HDMI_AUDIO_FS_192000 = 0xE,
377 HDMI_AUDIO_FS_NOT_INDICATED = 0x1
378};
379
380enum hdmi_core_audio_layout {
381 HDMI_AUDIO_LAYOUT_2CH = 0,
382 HDMI_AUDIO_LAYOUT_8CH = 1
383};
384
385enum hdmi_core_cts_mode {
386 HDMI_AUDIO_CTS_MODE_HW = 0,
387 HDMI_AUDIO_CTS_MODE_SW = 1
388};
389
390enum hdmi_stereo_channels {
391 HDMI_AUDIO_STEREO_NOCHANNELS = 0,
392 HDMI_AUDIO_STEREO_ONECHANNEL = 1,
393 HDMI_AUDIO_STEREO_TWOCHANNELS = 2,
394 HDMI_AUDIO_STEREO_THREECHANNELS = 3,
395 HDMI_AUDIO_STEREO_FOURCHANNELS = 4
396};
397
398enum hdmi_audio_type {
399 HDMI_AUDIO_TYPE_LPCM = 0,
400 HDMI_AUDIO_TYPE_IEC = 1
401};
402
403enum hdmi_audio_justify {
404 HDMI_AUDIO_JUSTIFY_LEFT = 0,
405 HDMI_AUDIO_JUSTIFY_RIGHT = 1
406};
407
408enum hdmi_audio_sample_order {
409 HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0,
410 HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1
411};
412
413enum hdmi_audio_samples_perword {
414 HDMI_AUDIO_ONEWORD_ONESAMPLE = 0,
415 HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
416};
417
418enum hdmi_audio_sample_size {
419 HDMI_AUDIO_SAMPLE_16BITS = 0,
420 HDMI_AUDIO_SAMPLE_24BITS = 1
421};
422
423enum hdmi_audio_transf_mode {
424 HDMI_AUDIO_TRANSF_DMA = 0,
425 HDMI_AUDIO_TRANSF_IRQ = 1
426};
427
428enum hdmi_audio_blk_strt_end_sig {
429 HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0,
430 HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1
431};
432
433enum hdmi_audio_i2s_config {
434 HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
435 HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
436 HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
437 HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
438 HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
439 HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
440 HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
441 HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
442 HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
443 HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
444 HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
445 HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
446 HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
447 HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
448 HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
449 HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
450 HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
451 HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
452 HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
453 HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
454 HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
455 HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
456 HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
457 HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
458 HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
459 HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
460 HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
461 HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
462 HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
463 HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
464 HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
465 HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
466 HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
467 HDMI_AUDIO_I2S_SD0_EN = 1,
468 HDMI_AUDIO_I2S_SD1_EN = 1 << 1,
469 HDMI_AUDIO_I2S_SD2_EN = 1 << 2,
470 HDMI_AUDIO_I2S_SD3_EN = 1 << 3,
471};
472
473enum hdmi_audio_mclk_mode {
474 HDMI_AUDIO_MCLK_128FS = 0,
475 HDMI_AUDIO_MCLK_256FS = 1,
476 HDMI_AUDIO_MCLK_384FS = 2,
477 HDMI_AUDIO_MCLK_512FS = 3,
478 HDMI_AUDIO_MCLK_768FS = 4,
479 HDMI_AUDIO_MCLK_1024FS = 5,
480 HDMI_AUDIO_MCLK_1152FS = 6,
481 HDMI_AUDIO_MCLK_192FS = 7
482};
483
330struct hdmi_core_video_config { 484struct hdmi_core_video_config {
331 enum hdmi_core_inputbus_width ip_bus_width; 485 enum hdmi_core_inputbus_width ip_bus_width;
332 enum hdmi_core_dither_trunc op_dither_truc; 486 enum hdmi_core_dither_trunc op_dither_truc;
@@ -376,6 +530,19 @@ struct hdmi_core_infoframe_avi {
376 u16 db12_13_pixel_sofright; 530 u16 db12_13_pixel_sofright;
377 /* Pixel number start of right bar */ 531 /* Pixel number start of right bar */
378}; 532};
533/*
534 * Refer to section 8.2 in HDMI 1.3 specification for
535 * details about infoframe databytes
536 */
537struct hdmi_core_infoframe_audio {
538 u8 db1_coding_type;
539 u8 db1_channel_count;
540 u8 db2_sample_freq;
541 u8 db2_sample_size;
542 u8 db4_channel_alloc;
543 bool db5_downmix_inh;
544 u8 db5_lsv; /* Level shift values for downmix */
545};
379 546
380struct hdmi_core_packet_enable_repeat { 547struct hdmi_core_packet_enable_repeat {
381 u32 audio_pkt; 548 u32 audio_pkt;
@@ -412,4 +579,53 @@ struct hdmi_config {
412 struct hdmi_cm cm; 579 struct hdmi_cm cm;
413}; 580};
414 581
582struct hdmi_audio_format {
583 enum hdmi_stereo_channels stereo_channels;
584 u8 active_chnnls_msk;
585 enum hdmi_audio_type type;
586 enum hdmi_audio_justify justification;
587 enum hdmi_audio_sample_order sample_order;
588 enum hdmi_audio_samples_perword samples_per_word;
589 enum hdmi_audio_sample_size sample_size;
590 enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end;
591};
592
593struct hdmi_audio_dma {
594 u8 transfer_size;
595 u8 block_size;
596 enum hdmi_audio_transf_mode mode;
597 u16 fifo_threshold;
598};
599
600struct hdmi_core_audio_i2s_config {
601 u8 word_max_length;
602 u8 word_length;
603 u8 in_length_bits;
604 u8 justification;
605 u8 en_high_bitrate_aud;
606 u8 sck_edge_mode;
607 u8 cbit_order;
608 u8 vbit;
609 u8 ws_polarity;
610 u8 direction;
611 u8 shift;
612 u8 active_sds;
613};
614
615struct hdmi_core_audio_config {
616 struct hdmi_core_audio_i2s_config i2s_cfg;
617 enum hdmi_core_audio_sample_freq freq_sample;
618 bool fs_override;
619 u32 n;
620 u32 cts;
621 u32 aud_par_busclk;
622 enum hdmi_core_audio_layout layout;
623 enum hdmi_core_cts_mode cts_mode;
624 bool use_mclk;
625 enum hdmi_audio_mclk_mode mclk_mode;
626 bool en_acr_pkt;
627 bool en_dsd_audio;
628 bool en_parallel_aud_input;
629 bool en_spdif;
630};
415#endif 631#endif
diff --git a/drivers/video/omap2/dss/hdmi_omap4_panel.c b/drivers/video/omap2/dss/hdmi_omap4_panel.c
index ffb5de94131f..7d4f2bd7c506 100644
--- a/drivers/video/omap2/dss/hdmi_omap4_panel.c
+++ b/drivers/video/omap2/dss/hdmi_omap4_panel.c
@@ -24,7 +24,7 @@
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <plat/display.h> 27#include <video/omapdss.h>
28 28
29#include "dss.h" 29#include "dss.h"
30 30
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index bcd37ec86952..9aeea50e33ff 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -29,7 +29,7 @@
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/jiffies.h> 30#include <linux/jiffies.h>
31 31
32#include <plat/display.h> 32#include <video/omapdss.h>
33#include <plat/cpu.h> 33#include <plat/cpu.h>
34 34
35#include "dss.h" 35#include "dss.h"
@@ -393,6 +393,7 @@ struct overlay_cache_data {
393 393
394 u32 paddr; 394 u32 paddr;
395 void __iomem *vaddr; 395 void __iomem *vaddr;
396 u32 p_uv_addr; /* relevant for NV12 format only */
396 u16 screen_width; 397 u16 screen_width;
397 u16 width; 398 u16 width;
398 u16 height; 399 u16 height;
@@ -775,10 +776,17 @@ static int configure_overlay(enum omap_plane plane)
775 } 776 }
776 777
777 switch (c->color_mode) { 778 switch (c->color_mode) {
779 case OMAP_DSS_COLOR_NV12:
780 bpp = 8;
781 break;
778 case OMAP_DSS_COLOR_RGB16: 782 case OMAP_DSS_COLOR_RGB16:
779 case OMAP_DSS_COLOR_ARGB16: 783 case OMAP_DSS_COLOR_ARGB16:
780 case OMAP_DSS_COLOR_YUV2: 784 case OMAP_DSS_COLOR_YUV2:
781 case OMAP_DSS_COLOR_UYVY: 785 case OMAP_DSS_COLOR_UYVY:
786 case OMAP_DSS_COLOR_RGBA16:
787 case OMAP_DSS_COLOR_RGBX16:
788 case OMAP_DSS_COLOR_ARGB16_1555:
789 case OMAP_DSS_COLOR_XRGB16_1555:
782 bpp = 16; 790 bpp = 16;
783 break; 791 break;
784 792
@@ -854,7 +862,8 @@ static int configure_overlay(enum omap_plane plane)
854 c->mirror, 862 c->mirror,
855 c->global_alpha, 863 c->global_alpha,
856 c->pre_mult_alpha, 864 c->pre_mult_alpha,
857 c->channel); 865 c->channel,
866 c->p_uv_addr);
858 867
859 if (r) { 868 if (r) {
860 /* this shouldn't happen */ 869 /* this shouldn't happen */
@@ -1269,6 +1278,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1269 1278
1270 oc->paddr = ovl->info.paddr; 1279 oc->paddr = ovl->info.paddr;
1271 oc->vaddr = ovl->info.vaddr; 1280 oc->vaddr = ovl->info.vaddr;
1281 oc->p_uv_addr = ovl->info.p_uv_addr;
1272 oc->screen_width = ovl->info.screen_width; 1282 oc->screen_width = ovl->info.screen_width;
1273 oc->width = ovl->info.width; 1283 oc->width = ovl->info.width;
1274 oc->height = ovl->info.height; 1284 oc->height = ovl->info.height;
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index f1aca6d04011..0f08025b1f0e 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -31,7 +31,7 @@
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33 33
34#include <plat/display.h> 34#include <video/omapdss.h>
35#include <plat/cpu.h> 35#include <plat/cpu.h>
36 36
37#include "dss.h" 37#include "dss.h"
@@ -201,12 +201,16 @@ static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
201static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf, 201static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
202 size_t size) 202 size_t size)
203{ 203{
204 int r; 204 int r, enable;
205 struct omap_overlay_info info; 205 struct omap_overlay_info info;
206 206
207 ovl->get_overlay_info(ovl, &info); 207 ovl->get_overlay_info(ovl, &info);
208 208
209 info.enabled = simple_strtoul(buf, NULL, 10); 209 r = kstrtoint(buf, 0, &enable);
210 if (r)
211 return r;
212
213 info.enabled = !!enable;
210 214
211 r = ovl->set_overlay_info(ovl, &info); 215 r = ovl->set_overlay_info(ovl, &info);
212 if (r) 216 if (r)
@@ -231,8 +235,13 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
231 const char *buf, size_t size) 235 const char *buf, size_t size)
232{ 236{
233 int r; 237 int r;
238 u8 alpha;
234 struct omap_overlay_info info; 239 struct omap_overlay_info info;
235 240
241 r = kstrtou8(buf, 0, &alpha);
242 if (r)
243 return r;
244
236 ovl->get_overlay_info(ovl, &info); 245 ovl->get_overlay_info(ovl, &info);
237 246
238 /* Video1 plane does not support global alpha 247 /* Video1 plane does not support global alpha
@@ -242,7 +251,7 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
242 ovl->id == OMAP_DSS_VIDEO1) 251 ovl->id == OMAP_DSS_VIDEO1)
243 info.global_alpha = 255; 252 info.global_alpha = 255;
244 else 253 else
245 info.global_alpha = simple_strtoul(buf, NULL, 10); 254 info.global_alpha = alpha;
246 255
247 r = ovl->set_overlay_info(ovl, &info); 256 r = ovl->set_overlay_info(ovl, &info);
248 if (r) 257 if (r)
@@ -268,8 +277,13 @@ static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
268 const char *buf, size_t size) 277 const char *buf, size_t size)
269{ 278{
270 int r; 279 int r;
280 u8 alpha;
271 struct omap_overlay_info info; 281 struct omap_overlay_info info;
272 282
283 r = kstrtou8(buf, 0, &alpha);
284 if (r)
285 return r;
286
273 ovl->get_overlay_info(ovl, &info); 287 ovl->get_overlay_info(ovl, &info);
274 288
275 /* only GFX and Video2 plane support pre alpha multiplied 289 /* only GFX and Video2 plane support pre alpha multiplied
@@ -279,7 +293,7 @@ static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
279 ovl->id == OMAP_DSS_VIDEO1) 293 ovl->id == OMAP_DSS_VIDEO1)
280 info.pre_mult_alpha = 0; 294 info.pre_mult_alpha = 0;
281 else 295 else
282 info.pre_mult_alpha = simple_strtoul(buf, NULL, 10); 296 info.pre_mult_alpha = alpha;
283 297
284 r = ovl->set_overlay_info(ovl, &info); 298 r = ovl->set_overlay_info(ovl, &info);
285 if (r) 299 if (r)
@@ -491,13 +505,18 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
491 ovl->manager = mgr; 505 ovl->manager = mgr;
492 506
493 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); 507 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
494 /* XXX: on manual update display, in auto update mode, a bug happens 508 /* XXX: When there is an overlay on a DSI manual update display, and
495 * here. When an overlay is first enabled on LCD, then it's disabled, 509 * the overlay is first disabled, then moved to tv, and enabled, we
496 * and the manager is changed to TV, we sometimes get SYNC_LOST_DIGIT 510 * seem to get SYNC_LOST_DIGIT error.
497 * errors. Waiting before changing the channel_out fixes it. I'm 511 *
498 * guessing that the overlay is still somehow being used for the LCD, 512 * Waiting doesn't seem to help, but updating the manual update display
499 * but I don't understand how or why. */ 513 * after disabling the overlay seems to fix this. This hints that the
500 msleep(40); 514 * overlay is perhaps somehow tied to the LCD output until the output
515 * is updated.
516 *
517 * Userspace workaround for this is to update the LCD after disabling
518 * the overlay, but before moving the overlay to TV.
519 */
501 dispc_set_channel_out(ovl->id, mgr->id); 520 dispc_set_channel_out(ovl->id, mgr->id);
502 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 521 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
503 522
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 5ea17f49c611..c06fbe0bc678 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -32,8 +32,9 @@
32#include <linux/ktime.h> 32#include <linux/ktime.h>
33#include <linux/hrtimer.h> 33#include <linux/hrtimer.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/semaphore.h>
35 36
36#include <plat/display.h> 37#include <video/omapdss.h>
37#include "dss.h" 38#include "dss.h"
38 39
39struct rfbi_reg { u16 idx; }; 40struct rfbi_reg { u16 idx; };
@@ -65,9 +66,6 @@ struct rfbi_reg { u16 idx; };
65#define REG_FLD_MOD(idx, val, start, end) \ 66#define REG_FLD_MOD(idx, val, start, end) \
66 rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end)) 67 rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end))
67 68
68/* To work around an RFBI transfer rate limitation */
69#define OMAP_RFBI_RATE_LIMIT 1
70
71enum omap_rfbi_cycleformat { 69enum omap_rfbi_cycleformat {
72 OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0, 70 OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0,
73 OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1, 71 OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1,
@@ -89,11 +87,6 @@ enum omap_rfbi_parallelmode {
89 OMAP_DSS_RFBI_PARALLELMODE_16 = 3, 87 OMAP_DSS_RFBI_PARALLELMODE_16 = 3,
90}; 88};
91 89
92enum update_cmd {
93 RFBI_CMD_UPDATE = 0,
94 RFBI_CMD_SYNC = 1,
95};
96
97static int rfbi_convert_timings(struct rfbi_timings *t); 90static int rfbi_convert_timings(struct rfbi_timings *t);
98static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div); 91static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
99 92
@@ -114,20 +107,9 @@ static struct {
114 107
115 struct omap_dss_device *dssdev[2]; 108 struct omap_dss_device *dssdev[2];
116 109
117 struct kfifo cmd_fifo; 110 struct semaphore bus_lock;
118 spinlock_t cmd_lock;
119 struct completion cmd_done;
120 atomic_t cmd_fifo_full;
121 atomic_t cmd_pending;
122} rfbi; 111} rfbi;
123 112
124struct update_region {
125 u16 x;
126 u16 y;
127 u16 w;
128 u16 h;
129};
130
131static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val) 113static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val)
132{ 114{
133 __raw_writel(val, rfbi.base + idx.idx); 115 __raw_writel(val, rfbi.base + idx.idx);
@@ -146,9 +128,20 @@ static void rfbi_enable_clocks(bool enable)
146 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); 128 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
147} 129}
148 130
131void rfbi_bus_lock(void)
132{
133 down(&rfbi.bus_lock);
134}
135EXPORT_SYMBOL(rfbi_bus_lock);
136
137void rfbi_bus_unlock(void)
138{
139 up(&rfbi.bus_lock);
140}
141EXPORT_SYMBOL(rfbi_bus_unlock);
142
149void omap_rfbi_write_command(const void *buf, u32 len) 143void omap_rfbi_write_command(const void *buf, u32 len)
150{ 144{
151 rfbi_enable_clocks(1);
152 switch (rfbi.parallelmode) { 145 switch (rfbi.parallelmode) {
153 case OMAP_DSS_RFBI_PARALLELMODE_8: 146 case OMAP_DSS_RFBI_PARALLELMODE_8:
154 { 147 {
@@ -172,13 +165,11 @@ void omap_rfbi_write_command(const void *buf, u32 len)
172 default: 165 default:
173 BUG(); 166 BUG();
174 } 167 }
175 rfbi_enable_clocks(0);
176} 168}
177EXPORT_SYMBOL(omap_rfbi_write_command); 169EXPORT_SYMBOL(omap_rfbi_write_command);
178 170
179void omap_rfbi_read_data(void *buf, u32 len) 171void omap_rfbi_read_data(void *buf, u32 len)
180{ 172{
181 rfbi_enable_clocks(1);
182 switch (rfbi.parallelmode) { 173 switch (rfbi.parallelmode) {
183 case OMAP_DSS_RFBI_PARALLELMODE_8: 174 case OMAP_DSS_RFBI_PARALLELMODE_8:
184 { 175 {
@@ -206,13 +197,11 @@ void omap_rfbi_read_data(void *buf, u32 len)
206 default: 197 default:
207 BUG(); 198 BUG();
208 } 199 }
209 rfbi_enable_clocks(0);
210} 200}
211EXPORT_SYMBOL(omap_rfbi_read_data); 201EXPORT_SYMBOL(omap_rfbi_read_data);
212 202
213void omap_rfbi_write_data(const void *buf, u32 len) 203void omap_rfbi_write_data(const void *buf, u32 len)
214{ 204{
215 rfbi_enable_clocks(1);
216 switch (rfbi.parallelmode) { 205 switch (rfbi.parallelmode) {
217 case OMAP_DSS_RFBI_PARALLELMODE_8: 206 case OMAP_DSS_RFBI_PARALLELMODE_8:
218 { 207 {
@@ -237,7 +226,6 @@ void omap_rfbi_write_data(const void *buf, u32 len)
237 BUG(); 226 BUG();
238 227
239 } 228 }
240 rfbi_enable_clocks(0);
241} 229}
242EXPORT_SYMBOL(omap_rfbi_write_data); 230EXPORT_SYMBOL(omap_rfbi_write_data);
243 231
@@ -249,8 +237,6 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
249 int horiz_offset = scr_width - w; 237 int horiz_offset = scr_width - w;
250 int i; 238 int i;
251 239
252 rfbi_enable_clocks(1);
253
254 if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && 240 if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
255 rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { 241 rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
256 const u16 __iomem *pd = buf; 242 const u16 __iomem *pd = buf;
@@ -295,12 +281,10 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
295 } else { 281 } else {
296 BUG(); 282 BUG();
297 } 283 }
298
299 rfbi_enable_clocks(0);
300} 284}
301EXPORT_SYMBOL(omap_rfbi_write_pixels); 285EXPORT_SYMBOL(omap_rfbi_write_pixels);
302 286
303void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, 287static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
304 u16 height, void (*callback)(void *data), void *data) 288 u16 height, void (*callback)(void *data), void *data)
305{ 289{
306 u32 l; 290 u32 l;
@@ -317,8 +301,6 @@ void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
317 rfbi.framedone_callback = callback; 301 rfbi.framedone_callback = callback;
318 rfbi.framedone_callback_data = data; 302 rfbi.framedone_callback_data = data;
319 303
320 rfbi_enable_clocks(1);
321
322 rfbi_write_reg(RFBI_PIXEL_CNT, width * height); 304 rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
323 305
324 l = rfbi_read_reg(RFBI_CONTROL); 306 l = rfbi_read_reg(RFBI_CONTROL);
@@ -337,15 +319,11 @@ static void framedone_callback(void *data, u32 mask)
337 319
338 REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0); 320 REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0);
339 321
340 rfbi_enable_clocks(0);
341
342 callback = rfbi.framedone_callback; 322 callback = rfbi.framedone_callback;
343 rfbi.framedone_callback = NULL; 323 rfbi.framedone_callback = NULL;
344 324
345 if (callback != NULL) 325 if (callback != NULL)
346 callback(rfbi.framedone_callback_data); 326 callback(rfbi.framedone_callback_data);
347
348 atomic_set(&rfbi.cmd_pending, 0);
349} 327}
350 328
351#if 1 /* VERBOSE */ 329#if 1 /* VERBOSE */
@@ -435,7 +413,7 @@ static int calc_extif_timings(struct rfbi_timings *t)
435} 413}
436 414
437 415
438void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t) 416static void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
439{ 417{
440 int r; 418 int r;
441 419
@@ -447,7 +425,6 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
447 425
448 BUG_ON(!t->converted); 426 BUG_ON(!t->converted);
449 427
450 rfbi_enable_clocks(1);
451 rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]); 428 rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]);
452 rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]); 429 rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]);
453 430
@@ -456,7 +433,6 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
456 (t->tim[2] ? 1 : 0), 4, 4); 433 (t->tim[2] ? 1 : 0), 4, 4);
457 434
458 rfbi_print_timings(); 435 rfbi_print_timings();
459 rfbi_enable_clocks(0);
460} 436}
461 437
462static int ps_to_rfbi_ticks(int time, int div) 438static int ps_to_rfbi_ticks(int time, int div)
@@ -472,59 +448,6 @@ static int ps_to_rfbi_ticks(int time, int div)
472 return ret; 448 return ret;
473} 449}
474 450
475#ifdef OMAP_RFBI_RATE_LIMIT
476unsigned long rfbi_get_max_tx_rate(void)
477{
478 unsigned long l4_rate, dss1_rate;
479 int min_l4_ticks = 0;
480 int i;
481
482 /* According to TI this can't be calculated so make the
483 * adjustments for a couple of known frequencies and warn for
484 * others.
485 */
486 static const struct {
487 unsigned long l4_clk; /* HZ */
488 unsigned long dss1_clk; /* HZ */
489 unsigned long min_l4_ticks;
490 } ftab[] = {
491 { 55, 132, 7, }, /* 7.86 MPix/s */
492 { 110, 110, 12, }, /* 9.16 MPix/s */
493 { 110, 132, 10, }, /* 11 Mpix/s */
494 { 120, 120, 10, }, /* 12 Mpix/s */
495 { 133, 133, 10, }, /* 13.3 Mpix/s */
496 };
497
498 l4_rate = rfbi.l4_khz / 1000;
499 dss1_rate = dss_clk_get_rate(DSS_CLK_FCK) / 1000000;
500
501 for (i = 0; i < ARRAY_SIZE(ftab); i++) {
502 /* Use a window instead of an exact match, to account
503 * for different DPLL multiplier / divider pairs.
504 */
505 if (abs(ftab[i].l4_clk - l4_rate) < 3 &&
506 abs(ftab[i].dss1_clk - dss1_rate) < 3) {
507 min_l4_ticks = ftab[i].min_l4_ticks;
508 break;
509 }
510 }
511 if (i == ARRAY_SIZE(ftab)) {
512 /* Can't be sure, return anyway the maximum not
513 * rate-limited. This might cause a problem only for the
514 * tearing synchronisation.
515 */
516 DSSERR("can't determine maximum RFBI transfer rate\n");
517 return rfbi.l4_khz * 1000;
518 }
519 return rfbi.l4_khz * 1000 / min_l4_ticks;
520}
521#else
522int rfbi_get_max_tx_rate(void)
523{
524 return rfbi.l4_khz * 1000;
525}
526#endif
527
528static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div) 451static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
529{ 452{
530 *clk_period = 1000000000 / rfbi.l4_khz; 453 *clk_period = 1000000000 / rfbi.l4_khz;
@@ -644,7 +567,6 @@ int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
644 DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n", 567 DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n",
645 mode, hs, vs, hs_pol_inv, vs_pol_inv); 568 mode, hs, vs, hs_pol_inv, vs_pol_inv);
646 569
647 rfbi_enable_clocks(1);
648 rfbi_write_reg(RFBI_HSYNC_WIDTH, hs); 570 rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
649 rfbi_write_reg(RFBI_VSYNC_WIDTH, vs); 571 rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
650 572
@@ -657,7 +579,6 @@ int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
657 l &= ~(1 << 20); 579 l &= ~(1 << 20);
658 else 580 else
659 l |= 1 << 20; 581 l |= 1 << 20;
660 rfbi_enable_clocks(0);
661 582
662 return 0; 583 return 0;
663} 584}
@@ -672,7 +593,6 @@ int omap_rfbi_enable_te(bool enable, unsigned line)
672 if (line > (1 << 11) - 1) 593 if (line > (1 << 11) - 1)
673 return -EINVAL; 594 return -EINVAL;
674 595
675 rfbi_enable_clocks(1);
676 l = rfbi_read_reg(RFBI_CONFIG(0)); 596 l = rfbi_read_reg(RFBI_CONFIG(0));
677 l &= ~(0x3 << 2); 597 l &= ~(0x3 << 2);
678 if (enable) { 598 if (enable) {
@@ -682,50 +602,12 @@ int omap_rfbi_enable_te(bool enable, unsigned line)
682 rfbi.te_enabled = 0; 602 rfbi.te_enabled = 0;
683 rfbi_write_reg(RFBI_CONFIG(0), l); 603 rfbi_write_reg(RFBI_CONFIG(0), l);
684 rfbi_write_reg(RFBI_LINE_NUMBER, line); 604 rfbi_write_reg(RFBI_LINE_NUMBER, line);
685 rfbi_enable_clocks(0);
686 605
687 return 0; 606 return 0;
688} 607}
689EXPORT_SYMBOL(omap_rfbi_enable_te); 608EXPORT_SYMBOL(omap_rfbi_enable_te);
690 609
691#if 0 610static int rfbi_configure(int rfbi_module, int bpp, int lines)
692static void rfbi_enable_config(int enable1, int enable2)
693{
694 u32 l;
695 int cs = 0;
696
697 if (enable1)
698 cs |= 1<<0;
699 if (enable2)
700 cs |= 1<<1;
701
702 rfbi_enable_clocks(1);
703
704 l = rfbi_read_reg(RFBI_CONTROL);
705
706 l = FLD_MOD(l, cs, 3, 2);
707 l = FLD_MOD(l, 0, 1, 1);
708
709 rfbi_write_reg(RFBI_CONTROL, l);
710
711
712 l = rfbi_read_reg(RFBI_CONFIG(0));
713 l = FLD_MOD(l, 0, 3, 2); /* TRIGGERMODE: ITE */
714 /*l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */
715 /*l |= FLD_VAL(0, 8, 7); */ /* L4FORMAT, 1pix/L4 */
716
717 l = FLD_MOD(l, 0, 16, 16); /* A0POLARITY */
718 l = FLD_MOD(l, 1, 20, 20); /* TE_VSYNC_POLARITY */
719 l = FLD_MOD(l, 1, 21, 21); /* HSYNCPOLARITY */
720
721 l = FLD_MOD(l, OMAP_DSS_RFBI_PARALLELMODE_8, 1, 0);
722 rfbi_write_reg(RFBI_CONFIG(0), l);
723
724 rfbi_enable_clocks(0);
725}
726#endif
727
728int rfbi_configure(int rfbi_module, int bpp, int lines)
729{ 611{
730 u32 l; 612 u32 l;
731 int cycle1 = 0, cycle2 = 0, cycle3 = 0; 613 int cycle1 = 0, cycle2 = 0, cycle3 = 0;
@@ -821,8 +703,6 @@ int rfbi_configure(int rfbi_module, int bpp, int lines)
821 break; 703 break;
822 } 704 }
823 705
824 rfbi_enable_clocks(1);
825
826 REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */ 706 REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */
827 707
828 l = 0; 708 l = 0;
@@ -856,11 +736,15 @@ int rfbi_configure(int rfbi_module, int bpp, int lines)
856 DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n", 736 DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n",
857 bpp, lines, cycle1, cycle2, cycle3); 737 bpp, lines, cycle1, cycle2, cycle3);
858 738
859 rfbi_enable_clocks(0);
860
861 return 0; 739 return 0;
862} 740}
863EXPORT_SYMBOL(rfbi_configure); 741
742int omap_rfbi_configure(struct omap_dss_device *dssdev, int pixel_size,
743 int data_lines)
744{
745 return rfbi_configure(dssdev->phy.rfbi.channel, pixel_size, data_lines);
746}
747EXPORT_SYMBOL(omap_rfbi_configure);
864 748
865int omap_rfbi_prepare_update(struct omap_dss_device *dssdev, 749int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
866 u16 *x, u16 *y, u16 *w, u16 *h) 750 u16 *x, u16 *y, u16 *w, u16 *h)
@@ -960,6 +844,8 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
960{ 844{
961 int r; 845 int r;
962 846
847 rfbi_enable_clocks(1);
848
963 r = omap_dss_start_device(dssdev); 849 r = omap_dss_start_device(dssdev);
964 if (r) { 850 if (r) {
965 DSSERR("failed to start device\n"); 851 DSSERR("failed to start device\n");
@@ -1002,6 +888,8 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
1002 omap_dispc_unregister_isr(framedone_callback, NULL, 888 omap_dispc_unregister_isr(framedone_callback, NULL,
1003 DISPC_IRQ_FRAMEDONE); 889 DISPC_IRQ_FRAMEDONE);
1004 omap_dss_stop_device(dssdev); 890 omap_dss_stop_device(dssdev);
891
892 rfbi_enable_clocks(0);
1005} 893}
1006EXPORT_SYMBOL(omapdss_rfbi_display_disable); 894EXPORT_SYMBOL(omapdss_rfbi_display_disable);
1007 895
@@ -1021,11 +909,7 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
1021 909
1022 rfbi.pdev = pdev; 910 rfbi.pdev = pdev;
1023 911
1024 spin_lock_init(&rfbi.cmd_lock); 912 sema_init(&rfbi.bus_lock, 1);
1025
1026 init_completion(&rfbi.cmd_done);
1027 atomic_set(&rfbi.cmd_fifo_full, 0);
1028 atomic_set(&rfbi.cmd_pending, 0);
1029 913
1030 rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0); 914 rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
1031 if (!rfbi_mem) { 915 if (!rfbi_mem) {
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 54a53e648180..0bd4b0350f80 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -25,7 +25,7 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
27 27
28#include <plat/display.h> 28#include <video/omapdss.h>
29#include <plat/cpu.h> 29#include <plat/cpu.h>
30#include "dss.h" 30#include "dss.h"
31 31
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 8e35a5bae429..980f919ed987 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -34,7 +34,7 @@
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/regulator/consumer.h> 35#include <linux/regulator/consumer.h>
36 36
37#include <plat/display.h> 37#include <video/omapdss.h>
38#include <plat/cpu.h> 38#include <plat/cpu.h>
39 39
40#include "dss.h" 40#include "dss.h"
@@ -373,8 +373,11 @@ static void venc_reset(void)
373 } 373 }
374 } 374 }
375 375
376#ifdef CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET
376 /* the magical sleep that makes things work */ 377 /* the magical sleep that makes things work */
378 /* XXX more info? What bug this circumvents? */
377 msleep(20); 379 msleep(20);
380#endif
378} 381}
379 382
380static void venc_enable_clocks(int enable) 383static void venc_enable_clocks(int enable)
@@ -473,6 +476,12 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
473 476
474 mutex_lock(&venc.venc_lock); 477 mutex_lock(&venc.venc_lock);
475 478
479 r = omap_dss_start_device(dssdev);
480 if (r) {
481 DSSERR("failed to start device\n");
482 goto err0;
483 }
484
476 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { 485 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
477 r = -EINVAL; 486 r = -EINVAL;
478 goto err1; 487 goto err1;
@@ -484,10 +493,11 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
484 493
485 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 494 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
486 495
487 /* wait couple of vsyncs until enabling the LCD */ 496 mutex_unlock(&venc.venc_lock);
488 msleep(50); 497 return 0;
489
490err1: 498err1:
499 omap_dss_stop_device(dssdev);
500err0:
491 mutex_unlock(&venc.venc_lock); 501 mutex_unlock(&venc.venc_lock);
492 502
493 return r; 503 return r;
@@ -510,10 +520,9 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
510 520
511 venc_power_off(dssdev); 521 venc_power_off(dssdev);
512 522
513 /* wait at least 5 vsyncs after disabling the LCD */
514 msleep(100);
515
516 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 523 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
524
525 omap_dss_stop_device(dssdev);
517end: 526end:
518 mutex_unlock(&venc.venc_lock); 527 mutex_unlock(&venc.venc_lock);
519} 528}
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 6f435450987e..cff450392b79 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -28,7 +28,7 @@
28#include <linux/omapfb.h> 28#include <linux/omapfb.h>
29#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
30 30
31#include <plat/display.h> 31#include <video/omapdss.h>
32#include <plat/vrfb.h> 32#include <plat/vrfb.h>
33#include <plat/vram.h> 33#include <plat/vram.h>
34 34
@@ -895,8 +895,16 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
895 895
896 p.display_info.xres = xres; 896 p.display_info.xres = xres;
897 p.display_info.yres = yres; 897 p.display_info.yres = yres;
898 p.display_info.width = 0; 898
899 p.display_info.height = 0; 899 if (display->driver->get_dimensions) {
900 u32 w, h;
901 display->driver->get_dimensions(display, &w, &h);
902 p.display_info.width = w;
903 p.display_info.height = h;
904 } else {
905 p.display_info.width = 0;
906 p.display_info.height = 0;
907 }
900 908
901 if (copy_to_user((void __user *)arg, &p.display_info, 909 if (copy_to_user((void __user *)arg, &p.display_info,
902 sizeof(p.display_info))) 910 sizeof(p.display_info)))
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 505ec6672049..505bc12a3031 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -30,7 +30,7 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/omapfb.h> 31#include <linux/omapfb.h>
32 32
33#include <plat/display.h> 33#include <video/omapdss.h>
34#include <plat/vram.h> 34#include <plat/vram.h>
35#include <plat/vrfb.h> 35#include <plat/vrfb.h>
36 36
@@ -702,8 +702,16 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
702 var->xres, var->yres, 702 var->xres, var->yres,
703 var->xres_virtual, var->yres_virtual); 703 var->xres_virtual, var->yres_virtual);
704 704
705 var->height = -1; 705 if (display && display->driver->get_dimensions) {
706 var->width = -1; 706 u32 w, h;
707 display->driver->get_dimensions(display, &w, &h);
708 var->width = DIV_ROUND_CLOSEST(w, 1000);
709 var->height = DIV_ROUND_CLOSEST(h, 1000);
710 } else {
711 var->height = -1;
712 var->width = -1;
713 }
714
707 var->grayscale = 0; 715 var->grayscale = 0;
708 716
709 if (display && display->driver->get_timings) { 717 if (display && display->driver->get_timings) {
@@ -749,35 +757,6 @@ static int omapfb_open(struct fb_info *fbi, int user)
749 757
750static int omapfb_release(struct fb_info *fbi, int user) 758static int omapfb_release(struct fb_info *fbi, int user)
751{ 759{
752#if 0
753 struct omapfb_info *ofbi = FB2OFB(fbi);
754 struct omapfb2_device *fbdev = ofbi->fbdev;
755 struct omap_dss_device *display = fb2display(fbi);
756
757 DBG("Closing fb with plane index %d\n", ofbi->id);
758
759 omapfb_lock(fbdev);
760
761 if (display && display->get_update_mode && display->update) {
762 /* XXX this update should be removed, I think. But it's
763 * good for debugging */
764 if (display->get_update_mode(display) ==
765 OMAP_DSS_UPDATE_MANUAL) {
766 u16 w, h;
767
768 if (display->sync)
769 display->sync(display);
770
771 display->get_resolution(display, &w, &h);
772 display->update(display, 0, 0, w, h);
773 }
774 }
775
776 if (display && display->sync)
777 display->sync(display);
778
779 omapfb_unlock(fbdev);
780#endif
781 return 0; 760 return 0;
782} 761}
783 762
@@ -1263,7 +1242,6 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
1263 struct omapfb_info *ofbi = FB2OFB(fbi); 1242 struct omapfb_info *ofbi = FB2OFB(fbi);
1264 struct omapfb2_device *fbdev = ofbi->fbdev; 1243 struct omapfb2_device *fbdev = ofbi->fbdev;
1265 struct omap_dss_device *display = fb2display(fbi); 1244 struct omap_dss_device *display = fb2display(fbi);
1266 int do_update = 0;
1267 int r = 0; 1245 int r = 0;
1268 1246
1269 if (!display) 1247 if (!display)
@@ -1279,11 +1257,6 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
1279 if (display->driver->resume) 1257 if (display->driver->resume)
1280 r = display->driver->resume(display); 1258 r = display->driver->resume(display);
1281 1259
1282 if (r == 0 && display->driver->get_update_mode &&
1283 display->driver->get_update_mode(display) ==
1284 OMAP_DSS_UPDATE_MANUAL)
1285 do_update = 1;
1286
1287 break; 1260 break;
1288 1261
1289 case FB_BLANK_NORMAL: 1262 case FB_BLANK_NORMAL:
@@ -1307,13 +1280,6 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
1307exit: 1280exit:
1308 omapfb_unlock(fbdev); 1281 omapfb_unlock(fbdev);
1309 1282
1310 if (r == 0 && do_update && display->driver->update) {
1311 u16 w, h;
1312 display->driver->get_resolution(display, &w, &h);
1313
1314 r = display->driver->update(display, 0, 0, w, h);
1315 }
1316
1317 return r; 1283 return r;
1318} 1284}
1319 1285
@@ -2030,9 +1996,9 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
2030static int omapfb_mode_to_timings(const char *mode_str, 1996static int omapfb_mode_to_timings(const char *mode_str,
2031 struct omap_video_timings *timings, u8 *bpp) 1997 struct omap_video_timings *timings, u8 *bpp)
2032{ 1998{
2033 struct fb_info fbi; 1999 struct fb_info *fbi;
2034 struct fb_var_screeninfo var; 2000 struct fb_var_screeninfo *var;
2035 struct fb_ops fbops; 2001 struct fb_ops *fbops;
2036 int r; 2002 int r;
2037 2003
2038#ifdef CONFIG_OMAP2_DSS_VENC 2004#ifdef CONFIG_OMAP2_DSS_VENC
@@ -2050,39 +2016,66 @@ static int omapfb_mode_to_timings(const char *mode_str,
2050 /* this is quite a hack, but I wanted to use the modedb and for 2016 /* this is quite a hack, but I wanted to use the modedb and for
2051 * that we need fb_info and var, so we create dummy ones */ 2017 * that we need fb_info and var, so we create dummy ones */
2052 2018
2053 memset(&fbi, 0, sizeof(fbi)); 2019 *bpp = 0;
2054 memset(&var, 0, sizeof(var)); 2020 fbi = NULL;
2055 memset(&fbops, 0, sizeof(fbops)); 2021 var = NULL;
2056 fbi.fbops = &fbops; 2022 fbops = NULL;
2057
2058 r = fb_find_mode(&var, &fbi, mode_str, NULL, 0, NULL, 24);
2059
2060 if (r != 0) {
2061 timings->pixel_clock = PICOS2KHZ(var.pixclock);
2062 timings->hbp = var.left_margin;
2063 timings->hfp = var.right_margin;
2064 timings->vbp = var.upper_margin;
2065 timings->vfp = var.lower_margin;
2066 timings->hsw = var.hsync_len;
2067 timings->vsw = var.vsync_len;
2068 timings->x_res = var.xres;
2069 timings->y_res = var.yres;
2070
2071 switch (var.bits_per_pixel) {
2072 case 16:
2073 *bpp = 16;
2074 break;
2075 case 24:
2076 case 32:
2077 default:
2078 *bpp = 24;
2079 break;
2080 }
2081 2023
2082 return 0; 2024 fbi = kzalloc(sizeof(*fbi), GFP_KERNEL);
2083 } else { 2025 if (fbi == NULL) {
2084 return -EINVAL; 2026 r = -ENOMEM;
2027 goto err;
2028 }
2029
2030 var = kzalloc(sizeof(*var), GFP_KERNEL);
2031 if (var == NULL) {
2032 r = -ENOMEM;
2033 goto err;
2034 }
2035
2036 fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
2037 if (fbops == NULL) {
2038 r = -ENOMEM;
2039 goto err;
2040 }
2041
2042 fbi->fbops = fbops;
2043
2044 r = fb_find_mode(var, fbi, mode_str, NULL, 0, NULL, 24);
2045 if (r == 0) {
2046 r = -EINVAL;
2047 goto err;
2048 }
2049
2050 timings->pixel_clock = PICOS2KHZ(var->pixclock);
2051 timings->hbp = var->left_margin;
2052 timings->hfp = var->right_margin;
2053 timings->vbp = var->upper_margin;
2054 timings->vfp = var->lower_margin;
2055 timings->hsw = var->hsync_len;
2056 timings->vsw = var->vsync_len;
2057 timings->x_res = var->xres;
2058 timings->y_res = var->yres;
2059
2060 switch (var->bits_per_pixel) {
2061 case 16:
2062 *bpp = 16;
2063 break;
2064 case 24:
2065 case 32:
2066 default:
2067 *bpp = 24;
2068 break;
2085 } 2069 }
2070
2071 r = 0;
2072
2073err:
2074 kfree(fbi);
2075 kfree(var);
2076 kfree(fbops);
2077
2078 return r;
2086} 2079}
2087 2080
2088static int omapfb_set_def_mode(struct omapfb2_device *fbdev, 2081static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
@@ -2185,6 +2178,61 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
2185 return r; 2178 return r;
2186} 2179}
2187 2180
2181static int omapfb_init_display(struct omapfb2_device *fbdev,
2182 struct omap_dss_device *dssdev)
2183{
2184 struct omap_dss_driver *dssdrv = dssdev->driver;
2185 int r;
2186
2187 r = dssdrv->enable(dssdev);
2188 if (r) {
2189 dev_warn(fbdev->dev, "Failed to enable display '%s'\n",
2190 dssdev->name);
2191 return r;
2192 }
2193
2194 if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
2195 u16 w, h;
2196 if (dssdrv->enable_te) {
2197 r = dssdrv->enable_te(dssdev, 1);
2198 if (r) {
2199 dev_err(fbdev->dev, "Failed to set TE\n");
2200 return r;
2201 }
2202 }
2203
2204 if (dssdrv->set_update_mode) {
2205 r = dssdrv->set_update_mode(dssdev,
2206 OMAP_DSS_UPDATE_MANUAL);
2207 if (r) {
2208 dev_err(fbdev->dev,
2209 "Failed to set update mode\n");
2210 return r;
2211 }
2212 }
2213
2214 dssdrv->get_resolution(dssdev, &w, &h);
2215 r = dssdrv->update(dssdev, 0, 0, w, h);
2216 if (r) {
2217 dev_err(fbdev->dev,
2218 "Failed to update display\n");
2219 return r;
2220 }
2221 } else {
2222 if (dssdrv->set_update_mode) {
2223 r = dssdrv->set_update_mode(dssdev,
2224 OMAP_DSS_UPDATE_AUTO);
2225 if (r) {
2226 dev_err(fbdev->dev,
2227 "Failed to set update mode\n");
2228 return r;
2229 }
2230 }
2231 }
2232
2233 return 0;
2234}
2235
2188static int omapfb_probe(struct platform_device *pdev) 2236static int omapfb_probe(struct platform_device *pdev)
2189{ 2237{
2190 struct omapfb2_device *fbdev = NULL; 2238 struct omapfb2_device *fbdev = NULL;
@@ -2284,30 +2332,13 @@ static int omapfb_probe(struct platform_device *pdev)
2284 } 2332 }
2285 2333
2286 if (def_display) { 2334 if (def_display) {
2287 struct omap_dss_driver *dssdrv = def_display->driver; 2335 r = omapfb_init_display(fbdev, def_display);
2288
2289 r = def_display->driver->enable(def_display);
2290 if (r) { 2336 if (r) {
2291 dev_warn(fbdev->dev, "Failed to enable display '%s'\n", 2337 dev_err(fbdev->dev,
2292 def_display->name); 2338 "failed to initialize default "
2339 "display\n");
2293 goto cleanup; 2340 goto cleanup;
2294 } 2341 }
2295
2296 if (def_display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
2297 u16 w, h;
2298 if (dssdrv->enable_te)
2299 dssdrv->enable_te(def_display, 1);
2300 if (dssdrv->set_update_mode)
2301 dssdrv->set_update_mode(def_display,
2302 OMAP_DSS_UPDATE_MANUAL);
2303
2304 dssdrv->get_resolution(def_display, &w, &h);
2305 def_display->driver->update(def_display, 0, 0, w, h);
2306 } else {
2307 if (dssdrv->set_update_mode)
2308 dssdrv->set_update_mode(def_display,
2309 OMAP_DSS_UPDATE_AUTO);
2310 }
2311 } 2342 }
2312 2343
2313 DBG("create sysfs for fbs\n"); 2344 DBG("create sysfs for fbs\n");
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 6f9c72cd6bb0..2f5e817b2a9a 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -29,7 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/omapfb.h> 30#include <linux/omapfb.h>
31 31
32#include <plat/display.h> 32#include <video/omapdss.h>
33#include <plat/vrfb.h> 33#include <plat/vrfb.h>
34 34
35#include "omapfb.h" 35#include "omapfb.h"
@@ -50,10 +50,12 @@ static ssize_t store_rotate_type(struct device *dev,
50 struct fb_info *fbi = dev_get_drvdata(dev); 50 struct fb_info *fbi = dev_get_drvdata(dev);
51 struct omapfb_info *ofbi = FB2OFB(fbi); 51 struct omapfb_info *ofbi = FB2OFB(fbi);
52 struct omapfb2_mem_region *rg; 52 struct omapfb2_mem_region *rg;
53 enum omap_dss_rotation_type rot_type; 53 int rot_type;
54 int r; 54 int r;
55 55
56 rot_type = simple_strtoul(buf, NULL, 0); 56 r = kstrtoint(buf, 0, &rot_type);
57 if (r)
58 return r;
57 59
58 if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB) 60 if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
59 return -EINVAL; 61 return -EINVAL;
@@ -102,14 +104,15 @@ static ssize_t store_mirror(struct device *dev,
102{ 104{
103 struct fb_info *fbi = dev_get_drvdata(dev); 105 struct fb_info *fbi = dev_get_drvdata(dev);
104 struct omapfb_info *ofbi = FB2OFB(fbi); 106 struct omapfb_info *ofbi = FB2OFB(fbi);
105 unsigned long mirror; 107 int mirror;
106 int r; 108 int r;
107 struct fb_var_screeninfo new_var; 109 struct fb_var_screeninfo new_var;
108 110
109 mirror = simple_strtoul(buf, NULL, 0); 111 r = kstrtoint(buf, 0, &mirror);
112 if (r)
113 return r;
110 114
111 if (mirror != 0 && mirror != 1) 115 mirror = !!mirror;
112 return -EINVAL;
113 116
114 if (!lock_fb_info(fbi)) 117 if (!lock_fb_info(fbi))
115 return -ENODEV; 118 return -ENODEV;
@@ -445,7 +448,11 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
445 int r; 448 int r;
446 int i; 449 int i;
447 450
448 size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0)); 451 r = kstrtoul(buf, 0, &size);
452 if (r)
453 return r;
454
455 size = PAGE_ALIGN(size);
449 456
450 if (!lock_fb_info(fbi)) 457 if (!lock_fb_info(fbi))
451 return -ENODEV; 458 return -ENODEV;
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index 1305fc9880ba..aa1b1d974276 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -29,13 +29,15 @@
29 29
30#include <linux/rwsem.h> 30#include <linux/rwsem.h>
31 31
32#include <plat/display.h> 32#include <video/omapdss.h>
33 33
34#ifdef DEBUG 34#ifdef DEBUG
35extern unsigned int omapfb_debug; 35extern unsigned int omapfb_debug;
36#define DBG(format, ...) \ 36#define DBG(format, ...) \
37 if (omapfb_debug) \ 37 do { \
38 printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__) 38 if (omapfb_debug) \
39 printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__); \
40 } while (0)
39#else 41#else
40#define DBG(format, ...) 42#define DBG(format, ...)
41#endif 43#endif
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 3b6cdcac8f1a..0352afa49a39 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -182,6 +182,7 @@ struct s3c_fb_vsync {
182 182
183/** 183/**
184 * struct s3c_fb - overall hardware state of the hardware 184 * struct s3c_fb - overall hardware state of the hardware
185 * @slock: The spinlock protection for this data sturcture.
185 * @dev: The device that we bound to, for printing, etc. 186 * @dev: The device that we bound to, for printing, etc.
186 * @regs_res: The resource we claimed for the IO registers. 187 * @regs_res: The resource we claimed for the IO registers.
187 * @bus_clk: The clk (hclk) feeding our interface and possibly pixclk. 188 * @bus_clk: The clk (hclk) feeding our interface and possibly pixclk.
@@ -195,6 +196,7 @@ struct s3c_fb_vsync {
195 * @vsync_info: VSYNC-related information (count, queues...) 196 * @vsync_info: VSYNC-related information (count, queues...)
196 */ 197 */
197struct s3c_fb { 198struct s3c_fb {
199 spinlock_t slock;
198 struct device *dev; 200 struct device *dev;
199 struct resource *regs_res; 201 struct resource *regs_res;
200 struct clk *bus_clk; 202 struct clk *bus_clk;
@@ -300,6 +302,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
300 var->blue.length = 5; 302 var->blue.length = 5;
301 break; 303 break;
302 304
305 case 32:
303 case 28: 306 case 28:
304 case 25: 307 case 25:
305 var->transp.length = var->bits_per_pixel - 24; 308 var->transp.length = var->bits_per_pixel - 24;
@@ -308,7 +311,6 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
308 case 24: 311 case 24:
309 /* our 24bpp is unpacked, so 32bpp */ 312 /* our 24bpp is unpacked, so 32bpp */
310 var->bits_per_pixel = 32; 313 var->bits_per_pixel = 32;
311 case 32:
312 var->red.offset = 16; 314 var->red.offset = 16;
313 var->red.length = 8; 315 var->red.length = 8;
314 var->green.offset = 8; 316 var->green.offset = 8;
@@ -947,6 +949,8 @@ static irqreturn_t s3c_fb_irq(int irq, void *dev_id)
947 void __iomem *regs = sfb->regs; 949 void __iomem *regs = sfb->regs;
948 u32 irq_sts_reg; 950 u32 irq_sts_reg;
949 951
952 spin_lock(&sfb->slock);
953
950 irq_sts_reg = readl(regs + VIDINTCON1); 954 irq_sts_reg = readl(regs + VIDINTCON1);
951 955
952 if (irq_sts_reg & VIDINTCON1_INT_FRAME) { 956 if (irq_sts_reg & VIDINTCON1_INT_FRAME) {
@@ -963,6 +967,7 @@ static irqreturn_t s3c_fb_irq(int irq, void *dev_id)
963 */ 967 */
964 s3c_fb_disable_irq(sfb); 968 s3c_fb_disable_irq(sfb);
965 969
970 spin_unlock(&sfb->slock);
966 return IRQ_HANDLED; 971 return IRQ_HANDLED;
967} 972}
968 973
@@ -1339,6 +1344,8 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
1339 sfb->pdata = pd; 1344 sfb->pdata = pd;
1340 sfb->variant = fbdrv->variant; 1345 sfb->variant = fbdrv->variant;
1341 1346
1347 spin_lock_init(&sfb->slock);
1348
1342 sfb->bus_clk = clk_get(dev, "lcd"); 1349 sfb->bus_clk = clk_get(dev, "lcd");
1343 if (IS_ERR(sfb->bus_clk)) { 1350 if (IS_ERR(sfb->bus_clk)) {
1344 dev_err(dev, "failed to get bus clock\n"); 1351 dev_err(dev, "failed to get bus clock\n");
@@ -1442,8 +1449,7 @@ err_ioremap:
1442 iounmap(sfb->regs); 1449 iounmap(sfb->regs);
1443 1450
1444err_req_region: 1451err_req_region:
1445 release_resource(sfb->regs_res); 1452 release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
1446 kfree(sfb->regs_res);
1447 1453
1448err_clk: 1454err_clk:
1449 clk_disable(sfb->bus_clk); 1455 clk_disable(sfb->bus_clk);
@@ -1479,8 +1485,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
1479 clk_disable(sfb->bus_clk); 1485 clk_disable(sfb->bus_clk);
1480 clk_put(sfb->bus_clk); 1486 clk_put(sfb->bus_clk);
1481 1487
1482 release_resource(sfb->regs_res); 1488 release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
1483 kfree(sfb->regs_res);
1484 1489
1485 kfree(sfb); 1490 kfree(sfb);
1486 1491
@@ -1521,7 +1526,8 @@ static int s3c_fb_resume(struct device *dev)
1521 1526
1522 clk_enable(sfb->bus_clk); 1527 clk_enable(sfb->bus_clk);
1523 1528
1524 /* setup registers */ 1529 /* setup gpio and output polarity controls */
1530 pd->setup_gpio();
1525 writel(pd->vidcon1, sfb->regs + VIDCON1); 1531 writel(pd->vidcon1, sfb->regs + VIDCON1);
1526 1532
1527 /* zero all windows before we do anything */ 1533 /* zero all windows before we do anything */
@@ -1549,7 +1555,7 @@ static int s3c_fb_resume(struct device *dev)
1549 return 0; 1555 return 0;
1550} 1556}
1551 1557
1552int s3c_fb_runtime_suspend(struct device *dev) 1558static int s3c_fb_runtime_suspend(struct device *dev)
1553{ 1559{
1554 struct platform_device *pdev = to_platform_device(dev); 1560 struct platform_device *pdev = to_platform_device(dev);
1555 struct s3c_fb *sfb = platform_get_drvdata(pdev); 1561 struct s3c_fb *sfb = platform_get_drvdata(pdev);
@@ -1569,7 +1575,7 @@ int s3c_fb_runtime_suspend(struct device *dev)
1569 return 0; 1575 return 0;
1570} 1576}
1571 1577
1572int s3c_fb_runtime_resume(struct device *dev) 1578static int s3c_fb_runtime_resume(struct device *dev)
1573{ 1579{
1574 struct platform_device *pdev = to_platform_device(dev); 1580 struct platform_device *pdev = to_platform_device(dev);
1575 struct s3c_fb *sfb = platform_get_drvdata(pdev); 1581 struct s3c_fb *sfb = platform_get_drvdata(pdev);
@@ -1579,7 +1585,8 @@ int s3c_fb_runtime_resume(struct device *dev)
1579 1585
1580 clk_enable(sfb->bus_clk); 1586 clk_enable(sfb->bus_clk);
1581 1587
1582 /* setup registers */ 1588 /* setup gpio and output polarity controls */
1589 pd->setup_gpio();
1583 writel(pd->vidcon1, sfb->regs + VIDCON1); 1590 writel(pd->vidcon1, sfb->regs + VIDCON1);
1584 1591
1585 /* zero all windows before we do anything */ 1592 /* zero all windows before we do anything */
@@ -1623,28 +1630,31 @@ static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = {
1623 .has_osd_c = 1, 1630 .has_osd_c = 1,
1624 .osd_size_off = 0x8, 1631 .osd_size_off = 0x8,
1625 .palette_sz = 256, 1632 .palette_sz = 256,
1626 .valid_bpp = VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(24), 1633 .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) |
1634 VALID_BPP(18) | VALID_BPP(24)),
1627 }, 1635 },
1628 [1] = { 1636 [1] = {
1629 .has_osd_c = 1, 1637 .has_osd_c = 1,
1630 .has_osd_d = 1, 1638 .has_osd_d = 1,
1631 .osd_size_off = 0x12, 1639 .osd_size_off = 0xc,
1632 .has_osd_alpha = 1, 1640 .has_osd_alpha = 1,
1633 .palette_sz = 256, 1641 .palette_sz = 256,
1634 .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | 1642 .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) |
1635 VALID_BPP(18) | VALID_BPP(19) | 1643 VALID_BPP(18) | VALID_BPP(19) |
1636 VALID_BPP(24) | VALID_BPP(25)), 1644 VALID_BPP(24) | VALID_BPP(25) |
1645 VALID_BPP(28)),
1637 }, 1646 },
1638 [2] = { 1647 [2] = {
1639 .has_osd_c = 1, 1648 .has_osd_c = 1,
1640 .has_osd_d = 1, 1649 .has_osd_d = 1,
1641 .osd_size_off = 0x12, 1650 .osd_size_off = 0xc,
1642 .has_osd_alpha = 1, 1651 .has_osd_alpha = 1,
1643 .palette_sz = 16, 1652 .palette_sz = 16,
1644 .palette_16bpp = 1, 1653 .palette_16bpp = 1,
1645 .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | 1654 .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) |
1646 VALID_BPP(18) | VALID_BPP(19) | 1655 VALID_BPP(18) | VALID_BPP(19) |
1647 VALID_BPP(24) | VALID_BPP(25)), 1656 VALID_BPP(24) | VALID_BPP(25) |
1657 VALID_BPP(28)),
1648 }, 1658 },
1649 [3] = { 1659 [3] = {
1650 .has_osd_c = 1, 1660 .has_osd_c = 1,
@@ -1653,7 +1663,8 @@ static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = {
1653 .palette_16bpp = 1, 1663 .palette_16bpp = 1,
1654 .valid_bpp = (VALID_BPP124 | VALID_BPP(16) | 1664 .valid_bpp = (VALID_BPP124 | VALID_BPP(16) |
1655 VALID_BPP(18) | VALID_BPP(19) | 1665 VALID_BPP(18) | VALID_BPP(19) |
1656 VALID_BPP(24) | VALID_BPP(25)), 1666 VALID_BPP(24) | VALID_BPP(25) |
1667 VALID_BPP(28)),
1657 }, 1668 },
1658 [4] = { 1669 [4] = {
1659 .has_osd_c = 1, 1670 .has_osd_c = 1,
@@ -1662,7 +1673,65 @@ static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = {
1662 .palette_16bpp = 1, 1673 .palette_16bpp = 1,
1663 .valid_bpp = (VALID_BPP(1) | VALID_BPP(2) | 1674 .valid_bpp = (VALID_BPP(1) | VALID_BPP(2) |
1664 VALID_BPP(16) | VALID_BPP(18) | 1675 VALID_BPP(16) | VALID_BPP(18) |
1665 VALID_BPP(24) | VALID_BPP(25)), 1676 VALID_BPP(19) | VALID_BPP(24) |
1677 VALID_BPP(25) | VALID_BPP(28)),
1678 },
1679};
1680
1681static struct s3c_fb_win_variant s3c_fb_data_s5p_wins[] = {
1682 [0] = {
1683 .has_osd_c = 1,
1684 .osd_size_off = 0x8,
1685 .palette_sz = 256,
1686 .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) |
1687 VALID_BPP(15) | VALID_BPP(16) |
1688 VALID_BPP(18) | VALID_BPP(19) |
1689 VALID_BPP(24) | VALID_BPP(25) |
1690 VALID_BPP(32)),
1691 },
1692 [1] = {
1693 .has_osd_c = 1,
1694 .has_osd_d = 1,
1695 .osd_size_off = 0xc,
1696 .has_osd_alpha = 1,
1697 .palette_sz = 256,
1698 .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) |
1699 VALID_BPP(15) | VALID_BPP(16) |
1700 VALID_BPP(18) | VALID_BPP(19) |
1701 VALID_BPP(24) | VALID_BPP(25) |
1702 VALID_BPP(32)),
1703 },
1704 [2] = {
1705 .has_osd_c = 1,
1706 .has_osd_d = 1,
1707 .osd_size_off = 0xc,
1708 .has_osd_alpha = 1,
1709 .palette_sz = 256,
1710 .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) |
1711 VALID_BPP(15) | VALID_BPP(16) |
1712 VALID_BPP(18) | VALID_BPP(19) |
1713 VALID_BPP(24) | VALID_BPP(25) |
1714 VALID_BPP(32)),
1715 },
1716 [3] = {
1717 .has_osd_c = 1,
1718 .has_osd_alpha = 1,
1719 .palette_sz = 256,
1720 .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) |
1721 VALID_BPP(15) | VALID_BPP(16) |
1722 VALID_BPP(18) | VALID_BPP(19) |
1723 VALID_BPP(24) | VALID_BPP(25) |
1724 VALID_BPP(32)),
1725 },
1726 [4] = {
1727 .has_osd_c = 1,
1728 .has_osd_alpha = 1,
1729 .palette_sz = 256,
1730 .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) |
1731 VALID_BPP(15) | VALID_BPP(16) |
1732 VALID_BPP(18) | VALID_BPP(19) |
1733 VALID_BPP(24) | VALID_BPP(25) |
1734 VALID_BPP(32)),
1666 }, 1735 },
1667}; 1736};
1668 1737
@@ -1719,11 +1788,11 @@ static struct s3c_fb_driverdata s3c_fb_data_s5pc100 = {
1719 1788
1720 .has_prtcon = 1, 1789 .has_prtcon = 1,
1721 }, 1790 },
1722 .win[0] = &s3c_fb_data_64xx_wins[0], 1791 .win[0] = &s3c_fb_data_s5p_wins[0],
1723 .win[1] = &s3c_fb_data_64xx_wins[1], 1792 .win[1] = &s3c_fb_data_s5p_wins[1],
1724 .win[2] = &s3c_fb_data_64xx_wins[2], 1793 .win[2] = &s3c_fb_data_s5p_wins[2],
1725 .win[3] = &s3c_fb_data_64xx_wins[3], 1794 .win[3] = &s3c_fb_data_s5p_wins[3],
1726 .win[4] = &s3c_fb_data_64xx_wins[4], 1795 .win[4] = &s3c_fb_data_s5p_wins[4],
1727}; 1796};
1728 1797
1729static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = { 1798static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = {
@@ -1749,11 +1818,11 @@ static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = {
1749 1818
1750 .has_shadowcon = 1, 1819 .has_shadowcon = 1,
1751 }, 1820 },
1752 .win[0] = &s3c_fb_data_64xx_wins[0], 1821 .win[0] = &s3c_fb_data_s5p_wins[0],
1753 .win[1] = &s3c_fb_data_64xx_wins[1], 1822 .win[1] = &s3c_fb_data_s5p_wins[1],
1754 .win[2] = &s3c_fb_data_64xx_wins[2], 1823 .win[2] = &s3c_fb_data_s5p_wins[2],
1755 .win[3] = &s3c_fb_data_64xx_wins[3], 1824 .win[3] = &s3c_fb_data_s5p_wins[3],
1756 .win[4] = &s3c_fb_data_64xx_wins[4], 1825 .win[4] = &s3c_fb_data_s5p_wins[4],
1757}; 1826};
1758 1827
1759/* S3C2443/S3C2416 style hardware */ 1828/* S3C2443/S3C2416 style hardware */
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 61c819e35f7f..0aa13761de6e 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -867,7 +867,7 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
867 goto dealloc_fb; 867 goto dealloc_fb;
868 } 868 }
869 869
870 size = (res->end - res->start) + 1; 870 size = resource_size(res);
871 info->mem = request_mem_region(res->start, size, pdev->name); 871 info->mem = request_mem_region(res->start, size, pdev->name);
872 if (info->mem == NULL) { 872 if (info->mem == NULL) {
873 dev_err(&pdev->dev, "failed to get memory region\n"); 873 dev_err(&pdev->dev, "failed to get memory region\n");
@@ -997,8 +997,7 @@ release_irq:
997release_regs: 997release_regs:
998 iounmap(info->io); 998 iounmap(info->io);
999release_mem: 999release_mem:
1000 release_resource(info->mem); 1000 release_mem_region(res->start, size);
1001 kfree(info->mem);
1002dealloc_fb: 1001dealloc_fb:
1003 platform_set_drvdata(pdev, NULL); 1002 platform_set_drvdata(pdev, NULL);
1004 framebuffer_release(fbinfo); 1003 framebuffer_release(fbinfo);
@@ -1044,8 +1043,7 @@ static int __devexit s3c2410fb_remove(struct platform_device *pdev)
1044 1043
1045 iounmap(info->io); 1044 iounmap(info->io);
1046 1045
1047 release_resource(info->mem); 1046 release_mem_region(info->mem->start, resource_size(info->mem));
1048 kfree(info->mem);
1049 1047
1050 platform_set_drvdata(pdev, NULL); 1048 platform_set_drvdata(pdev, NULL);
1051 framebuffer_release(fbinfo); 1049 framebuffer_release(fbinfo);
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index c4482f2e5799..4ca5d0c8fe84 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -25,6 +25,9 @@
25#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */ 25#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
26#include <video/vga.h> 26#include <video/vga.h>
27 27
28#include <linux/i2c.h>
29#include <linux/i2c-algo-bit.h>
30
28#ifdef CONFIG_MTRR 31#ifdef CONFIG_MTRR
29#include <asm/mtrr.h> 32#include <asm/mtrr.h>
30#endif 33#endif
@@ -36,6 +39,12 @@ struct s3fb_info {
36 struct mutex open_lock; 39 struct mutex open_lock;
37 unsigned int ref_count; 40 unsigned int ref_count;
38 u32 pseudo_palette[16]; 41 u32 pseudo_palette[16];
42#ifdef CONFIG_FB_S3_DDC
43 u8 __iomem *mmio;
44 bool ddc_registered;
45 struct i2c_adapter ddc_adapter;
46 struct i2c_algo_bit_data ddc_algo;
47#endif
39}; 48};
40 49
41 50
@@ -105,6 +114,9 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
105#define CHIP_UNDECIDED_FLAG 0x80 114#define CHIP_UNDECIDED_FLAG 0x80
106#define CHIP_MASK 0xFF 115#define CHIP_MASK 0xFF
107 116
117#define MMIO_OFFSET 0x1000000
118#define MMIO_SIZE 0x10000
119
108/* CRT timing register sets */ 120/* CRT timing register sets */
109 121
110static const struct vga_regset s3_h_total_regs[] = {{0x00, 0, 7}, {0x5D, 0, 0}, VGA_REGSET_END}; 122static const struct vga_regset s3_h_total_regs[] = {{0x00, 0, 7}, {0x5D, 0, 0}, VGA_REGSET_END};
@@ -140,7 +152,7 @@ static const struct svga_timing_regs s3_timing_regs = {
140/* Module parameters */ 152/* Module parameters */
141 153
142 154
143static char *mode_option __devinitdata = "640x480-8@60"; 155static char *mode_option __devinitdata;
144 156
145#ifdef CONFIG_MTRR 157#ifdef CONFIG_MTRR
146static int mtrr __devinitdata = 1; 158static int mtrr __devinitdata = 1;
@@ -169,6 +181,119 @@ MODULE_PARM_DESC(fasttext, "Enable S3 fast text mode (1=enable, 0=disable, defau
169 181
170/* ------------------------------------------------------------------------- */ 182/* ------------------------------------------------------------------------- */
171 183
184#ifdef CONFIG_FB_S3_DDC
185
186#define DDC_REG 0xaa /* Trio 3D/1X/2X */
187#define DDC_MMIO_REG 0xff20 /* all other chips */
188#define DDC_SCL_OUT (1 << 0)
189#define DDC_SDA_OUT (1 << 1)
190#define DDC_SCL_IN (1 << 2)
191#define DDC_SDA_IN (1 << 3)
192#define DDC_DRIVE_EN (1 << 4)
193
194static bool s3fb_ddc_needs_mmio(int chip)
195{
196 return !(chip == CHIP_360_TRIO3D_1X ||
197 chip == CHIP_362_TRIO3D_2X ||
198 chip == CHIP_368_TRIO3D_2X);
199}
200
201static u8 s3fb_ddc_read(struct s3fb_info *par)
202{
203 if (s3fb_ddc_needs_mmio(par->chip))
204 return readb(par->mmio + DDC_MMIO_REG);
205 else
206 return vga_rcrt(par->state.vgabase, DDC_REG);
207}
208
209static void s3fb_ddc_write(struct s3fb_info *par, u8 val)
210{
211 if (s3fb_ddc_needs_mmio(par->chip))
212 writeb(val, par->mmio + DDC_MMIO_REG);
213 else
214 vga_wcrt(par->state.vgabase, DDC_REG, val);
215}
216
217static void s3fb_ddc_setscl(void *data, int val)
218{
219 struct s3fb_info *par = data;
220 unsigned char reg;
221
222 reg = s3fb_ddc_read(par) | DDC_DRIVE_EN;
223 if (val)
224 reg |= DDC_SCL_OUT;
225 else
226 reg &= ~DDC_SCL_OUT;
227 s3fb_ddc_write(par, reg);
228}
229
230static void s3fb_ddc_setsda(void *data, int val)
231{
232 struct s3fb_info *par = data;
233 unsigned char reg;
234
235 reg = s3fb_ddc_read(par) | DDC_DRIVE_EN;
236 if (val)
237 reg |= DDC_SDA_OUT;
238 else
239 reg &= ~DDC_SDA_OUT;
240 s3fb_ddc_write(par, reg);
241}
242
243static int s3fb_ddc_getscl(void *data)
244{
245 struct s3fb_info *par = data;
246
247 return !!(s3fb_ddc_read(par) & DDC_SCL_IN);
248}
249
250static int s3fb_ddc_getsda(void *data)
251{
252 struct s3fb_info *par = data;
253
254 return !!(s3fb_ddc_read(par) & DDC_SDA_IN);
255}
256
257static int __devinit s3fb_setup_ddc_bus(struct fb_info *info)
258{
259 struct s3fb_info *par = info->par;
260
261 strlcpy(par->ddc_adapter.name, info->fix.id,
262 sizeof(par->ddc_adapter.name));
263 par->ddc_adapter.owner = THIS_MODULE;
264 par->ddc_adapter.class = I2C_CLASS_DDC;
265 par->ddc_adapter.algo_data = &par->ddc_algo;
266 par->ddc_adapter.dev.parent = info->device;
267 par->ddc_algo.setsda = s3fb_ddc_setsda;
268 par->ddc_algo.setscl = s3fb_ddc_setscl;
269 par->ddc_algo.getsda = s3fb_ddc_getsda;
270 par->ddc_algo.getscl = s3fb_ddc_getscl;
271 par->ddc_algo.udelay = 10;
272 par->ddc_algo.timeout = 20;
273 par->ddc_algo.data = par;
274
275 i2c_set_adapdata(&par->ddc_adapter, par);
276
277 /*
278 * some Virge cards have external MUX to switch chip I2C bus between
279 * DDC and extension pins - switch it do DDC
280 */
281/* vga_wseq(par->state.vgabase, 0x08, 0x06); - not needed, already unlocked */
282 if (par->chip == CHIP_357_VIRGE_GX2 ||
283 par->chip == CHIP_359_VIRGE_GX2P)
284 svga_wseq_mask(par->state.vgabase, 0x0d, 0x01, 0x03);
285 else
286 svga_wseq_mask(par->state.vgabase, 0x0d, 0x00, 0x03);
287 /* some Virge need this or the DDC is ignored */
288 svga_wcrt_mask(par->state.vgabase, 0x5c, 0x03, 0x03);
289
290 return i2c_bit_add_bus(&par->ddc_adapter);
291}
292#endif /* CONFIG_FB_S3_DDC */
293
294
295/* ------------------------------------------------------------------------- */
296
172/* Set font in S3 fast text mode */ 297/* Set font in S3 fast text mode */
173 298
174static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map) 299static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map)
@@ -994,6 +1119,7 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
994 struct s3fb_info *par; 1119 struct s3fb_info *par;
995 int rc; 1120 int rc;
996 u8 regval, cr38, cr39; 1121 u8 regval, cr38, cr39;
1122 bool found = false;
997 1123
998 /* Ignore secondary VGA device because there is no VGA arbitration */ 1124 /* Ignore secondary VGA device because there is no VGA arbitration */
999 if (! svga_primary_device(dev)) { 1125 if (! svga_primary_device(dev)) {
@@ -1110,12 +1236,69 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
1110 info->fix.ypanstep = 0; 1236 info->fix.ypanstep = 0;
1111 info->fix.accel = FB_ACCEL_NONE; 1237 info->fix.accel = FB_ACCEL_NONE;
1112 info->pseudo_palette = (void*) (par->pseudo_palette); 1238 info->pseudo_palette = (void*) (par->pseudo_palette);
1239 info->var.bits_per_pixel = 8;
1240
1241#ifdef CONFIG_FB_S3_DDC
1242 /* Enable MMIO if needed */
1243 if (s3fb_ddc_needs_mmio(par->chip)) {
1244 par->mmio = ioremap(info->fix.smem_start + MMIO_OFFSET, MMIO_SIZE);
1245 if (par->mmio)
1246 svga_wcrt_mask(par->state.vgabase, 0x53, 0x08, 0x08); /* enable MMIO */
1247 else
1248 dev_err(info->device, "unable to map MMIO at 0x%lx, disabling DDC",
1249 info->fix.smem_start + MMIO_OFFSET);
1250 }
1251 if (!s3fb_ddc_needs_mmio(par->chip) || par->mmio)
1252 if (s3fb_setup_ddc_bus(info) == 0) {
1253 u8 *edid = fb_ddc_read(&par->ddc_adapter);
1254 par->ddc_registered = true;
1255 if (edid) {
1256 fb_edid_to_monspecs(edid, &info->monspecs);
1257 kfree(edid);
1258 if (!info->monspecs.modedb)
1259 dev_err(info->device, "error getting mode database\n");
1260 else {
1261 const struct fb_videomode *m;
1262
1263 fb_videomode_to_modelist(info->monspecs.modedb,
1264 info->monspecs.modedb_len,
1265 &info->modelist);
1266 m = fb_find_best_display(&info->monspecs, &info->modelist);
1267 if (m) {
1268 fb_videomode_to_var(&info->var, m);
1269 /* fill all other info->var's fields */
1270 if (s3fb_check_var(&info->var, info) == 0)
1271 found = true;
1272 }
1273 }
1274 }
1275 }
1276#endif
1277 if (!mode_option && !found)
1278 mode_option = "640x480-8@60";
1113 1279
1114 /* Prepare startup mode */ 1280 /* Prepare startup mode */
1115 rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8); 1281 if (mode_option) {
1116 if (! ((rc == 1) || (rc == 2))) { 1282 rc = fb_find_mode(&info->var, info, mode_option,
1117 rc = -EINVAL; 1283 info->monspecs.modedb, info->monspecs.modedb_len,
1118 dev_err(info->device, "mode %s not found\n", mode_option); 1284 NULL, info->var.bits_per_pixel);
1285 if (!rc || rc == 4) {
1286 rc = -EINVAL;
1287 dev_err(info->device, "mode %s not found\n", mode_option);
1288 fb_destroy_modedb(info->monspecs.modedb);
1289 info->monspecs.modedb = NULL;
1290 goto err_find_mode;
1291 }
1292 }
1293
1294 fb_destroy_modedb(info->monspecs.modedb);
1295 info->monspecs.modedb = NULL;
1296
1297 /* maximize virtual vertical size for fast scrolling */
1298 info->var.yres_virtual = info->fix.smem_len * 8 /
1299 (info->var.bits_per_pixel * info->var.xres_virtual);
1300 if (info->var.yres_virtual < info->var.yres) {
1301 dev_err(info->device, "virtual vertical size smaller than real\n");
1119 goto err_find_mode; 1302 goto err_find_mode;
1120 } 1303 }
1121 1304
@@ -1164,6 +1347,12 @@ err_reg_fb:
1164 fb_dealloc_cmap(&info->cmap); 1347 fb_dealloc_cmap(&info->cmap);
1165err_alloc_cmap: 1348err_alloc_cmap:
1166err_find_mode: 1349err_find_mode:
1350#ifdef CONFIG_FB_S3_DDC
1351 if (par->ddc_registered)
1352 i2c_del_adapter(&par->ddc_adapter);
1353 if (par->mmio)
1354 iounmap(par->mmio);
1355#endif
1167 pci_iounmap(dev, info->screen_base); 1356 pci_iounmap(dev, info->screen_base);
1168err_iomap: 1357err_iomap:
1169 pci_release_regions(dev); 1358 pci_release_regions(dev);
@@ -1180,12 +1369,11 @@ err_enable_device:
1180static void __devexit s3_pci_remove(struct pci_dev *dev) 1369static void __devexit s3_pci_remove(struct pci_dev *dev)
1181{ 1370{
1182 struct fb_info *info = pci_get_drvdata(dev); 1371 struct fb_info *info = pci_get_drvdata(dev);
1372 struct s3fb_info __maybe_unused *par = info->par;
1183 1373
1184 if (info) { 1374 if (info) {
1185 1375
1186#ifdef CONFIG_MTRR 1376#ifdef CONFIG_MTRR
1187 struct s3fb_info *par = info->par;
1188
1189 if (par->mtrr_reg >= 0) { 1377 if (par->mtrr_reg >= 0) {
1190 mtrr_del(par->mtrr_reg, 0, 0); 1378 mtrr_del(par->mtrr_reg, 0, 0);
1191 par->mtrr_reg = -1; 1379 par->mtrr_reg = -1;
@@ -1195,6 +1383,13 @@ static void __devexit s3_pci_remove(struct pci_dev *dev)
1195 unregister_framebuffer(info); 1383 unregister_framebuffer(info);
1196 fb_dealloc_cmap(&info->cmap); 1384 fb_dealloc_cmap(&info->cmap);
1197 1385
1386#ifdef CONFIG_FB_S3_DDC
1387 if (par->ddc_registered)
1388 i2c_del_adapter(&par->ddc_adapter);
1389 if (par->mmio)
1390 iounmap(par->mmio);
1391#endif
1392
1198 pci_iounmap(dev, info->screen_base); 1393 pci_iounmap(dev, info->screen_base);
1199 pci_release_regions(dev); 1394 pci_release_regions(dev);
1200/* pci_disable_device(dev); */ 1395/* pci_disable_device(dev); */
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index bb71fea07284..80fa87e2ae2f 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -171,6 +171,8 @@ void savagefb_create_i2c_busses(struct fb_info *info)
171 171
172 switch (par->chip) { 172 switch (par->chip) {
173 case S3_PROSAVAGE: 173 case S3_PROSAVAGE:
174 case S3_PROSAVAGEDDR:
175 case S3_TWISTER:
174 par->chan.reg = CR_SERIAL2; 176 par->chan.reg = CR_SERIAL2;
175 par->chan.ioaddr = par->mmio.vbase; 177 par->chan.ioaddr = par->mmio.vbase;
176 par->chan.algo.setsda = prosavage_gpio_setsda; 178 par->chan.algo.setsda = prosavage_gpio_setsda;
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index 4e9490c19d7d..32549d177b19 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -36,7 +36,6 @@
36#define PCI_CHIP_SAVAGE_IX 0x8c13 36#define PCI_CHIP_SAVAGE_IX 0x8c13
37#define PCI_CHIP_PROSAVAGE_PM 0x8a25 37#define PCI_CHIP_PROSAVAGE_PM 0x8a25
38#define PCI_CHIP_PROSAVAGE_KM 0x8a26 38#define PCI_CHIP_PROSAVAGE_KM 0x8a26
39 /* Twister is a code name; hope I get the real name soon. */
40#define PCI_CHIP_S3TWISTER_P 0x8d01 39#define PCI_CHIP_S3TWISTER_P 0x8d01
41#define PCI_CHIP_S3TWISTER_K 0x8d02 40#define PCI_CHIP_S3TWISTER_K 0x8d02
42#define PCI_CHIP_PROSAVAGE_DDR 0x8d03 41#define PCI_CHIP_PROSAVAGE_DDR 0x8d03
@@ -52,14 +51,15 @@
52#define PCI_CHIP_SUPSAV_IXCDDR 0x8c2f 51#define PCI_CHIP_SUPSAV_IXCDDR 0x8c2f
53 52
54 53
54#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
55 55
56#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) 56#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
57 57
58#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) || (chip==S3_PROSAVAGE)) 58#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) || (chip<=S3_PROSAVAGEDDR))
59 59
60#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) 60#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
61 61
62#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000)) 62#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) || (chip==S3_PROSAVAGEDDR))
63 63
64/* Chip tags. These are used to group the adapters into 64/* Chip tags. These are used to group the adapters into
65 * related families. 65 * related families.
@@ -71,6 +71,8 @@ typedef enum {
71 S3_SAVAGE_MX, 71 S3_SAVAGE_MX,
72 S3_SAVAGE4, 72 S3_SAVAGE4,
73 S3_PROSAVAGE, 73 S3_PROSAVAGE,
74 S3_TWISTER,
75 S3_PROSAVAGEDDR,
74 S3_SUPERSAVAGE, 76 S3_SUPERSAVAGE,
75 S3_SAVAGE2000, 77 S3_SAVAGE2000,
76 S3_LAST 78 S3_LAST
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index a2dc1a7ec758..3b7f2f5bae71 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -328,7 +328,9 @@ SavageSetup2DEngine(struct savagefb_par *par)
328 savage_out32(0x48C18, savage_in32(0x48C18, par) | 0x0C, par); 328 savage_out32(0x48C18, savage_in32(0x48C18, par) | 0x0C, par);
329 break; 329 break;
330 case S3_SAVAGE4: 330 case S3_SAVAGE4:
331 case S3_TWISTER:
331 case S3_PROSAVAGE: 332 case S3_PROSAVAGE:
333 case S3_PROSAVAGEDDR:
332 case S3_SUPERSAVAGE: 334 case S3_SUPERSAVAGE:
333 /* Disable BCI */ 335 /* Disable BCI */
334 savage_out32(0x48C18, savage_in32(0x48C18, par) & 0x3FF0, par); 336 savage_out32(0x48C18, savage_in32(0x48C18, par) & 0x3FF0, par);
@@ -1886,6 +1888,8 @@ static int savage_init_hw(struct savagefb_par *par)
1886 break; 1888 break;
1887 1889
1888 case S3_PROSAVAGE: 1890 case S3_PROSAVAGE:
1891 case S3_PROSAVAGEDDR:
1892 case S3_TWISTER:
1889 videoRam = RamSavageNB[(config1 & 0xE0) >> 5] * 1024; 1893 videoRam = RamSavageNB[(config1 & 0xE0) >> 5] * 1024;
1890 break; 1894 break;
1891 1895
@@ -1963,7 +1967,8 @@ static int savage_init_hw(struct savagefb_par *par)
1963 } 1967 }
1964 } 1968 }
1965 1969
1966 if (S3_SAVAGE_MOBILE_SERIES(par->chip) && !par->crtonly) 1970 if ((S3_SAVAGE_MOBILE_SERIES(par->chip) ||
1971 S3_MOBILE_TWISTER_SERIES(par->chip)) && !par->crtonly)
1967 par->display_type = DISP_LCD; 1972 par->display_type = DISP_LCD;
1968 else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi)) 1973 else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi))
1969 par->display_type = DISP_DFP; 1974 par->display_type = DISP_DFP;
@@ -2111,19 +2116,19 @@ static int __devinit savage_init_fb_info(struct fb_info *info,
2111 snprintf(info->fix.id, 16, "ProSavageKM"); 2116 snprintf(info->fix.id, 16, "ProSavageKM");
2112 break; 2117 break;
2113 case FB_ACCEL_S3TWISTER_P: 2118 case FB_ACCEL_S3TWISTER_P:
2114 par->chip = S3_PROSAVAGE; 2119 par->chip = S3_TWISTER;
2115 snprintf(info->fix.id, 16, "TwisterP"); 2120 snprintf(info->fix.id, 16, "TwisterP");
2116 break; 2121 break;
2117 case FB_ACCEL_S3TWISTER_K: 2122 case FB_ACCEL_S3TWISTER_K:
2118 par->chip = S3_PROSAVAGE; 2123 par->chip = S3_TWISTER;
2119 snprintf(info->fix.id, 16, "TwisterK"); 2124 snprintf(info->fix.id, 16, "TwisterK");
2120 break; 2125 break;
2121 case FB_ACCEL_PROSAVAGE_DDR: 2126 case FB_ACCEL_PROSAVAGE_DDR:
2122 par->chip = S3_PROSAVAGE; 2127 par->chip = S3_PROSAVAGEDDR;
2123 snprintf(info->fix.id, 16, "ProSavageDDR"); 2128 snprintf(info->fix.id, 16, "ProSavageDDR");
2124 break; 2129 break;
2125 case FB_ACCEL_PROSAVAGE_DDRK: 2130 case FB_ACCEL_PROSAVAGE_DDRK:
2126 par->chip = S3_PROSAVAGE; 2131 par->chip = S3_PROSAVAGEDDR;
2127 snprintf(info->fix.id, 16, "ProSavage8"); 2132 snprintf(info->fix.id, 16, "ProSavage8");
2128 break; 2133 break;
2129 } 2134 }
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 8fe19582c460..45e47d847163 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -551,8 +551,7 @@ out_unmap:
551 free_irq(par->irq, &par->vsync); 551 free_irq(par->irq, &par->vsync);
552 iounmap(par->base); 552 iounmap(par->base);
553out_res: 553out_res:
554 release_resource(par->ioarea); 554 release_mem_region(res->start, resource_size(res));
555 kfree(par->ioarea);
556out_fb: 555out_fb:
557 framebuffer_release(info); 556 framebuffer_release(info);
558 return ret; 557 return ret;
@@ -570,8 +569,7 @@ static int __devexit sh7760fb_remove(struct platform_device *dev)
570 if (par->irq >= 0) 569 if (par->irq >= 0)
571 free_irq(par->irq, par); 570 free_irq(par->irq, par);
572 iounmap(par->base); 571 iounmap(par->base);
573 release_resource(par->ioarea); 572 release_mem_region(par->ioarea->start, resource_size(par->ioarea));
574 kfree(par->ioarea);
575 framebuffer_release(info); 573 framebuffer_release(info);
576 platform_set_drvdata(dev, NULL); 574 platform_set_drvdata(dev, NULL);
577 575
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 2b9e56a6bde4..6ae40b630dc9 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -1131,15 +1131,19 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1131 pm_runtime_get_sync(hdmi->dev); 1131 pm_runtime_get_sync(hdmi->dev);
1132 1132
1133 ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate); 1133 ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate);
1134 if (ret < 0) 1134 if (ret < 0) {
1135 pm_runtime_put(hdmi->dev);
1135 goto out; 1136 goto out;
1137 }
1136 1138
1137 hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE; 1139 hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE;
1138 1140
1139 /* Reconfigure the clock */ 1141 /* Reconfigure the clock */
1140 ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate); 1142 ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate);
1141 if (ret < 0) 1143 if (ret < 0) {
1144 pm_runtime_put(hdmi->dev);
1142 goto out; 1145 goto out;
1146 }
1143 1147
1144 msleep(10); 1148 msleep(10);
1145 sh_hdmi_configure(hdmi); 1149 sh_hdmi_configure(hdmi);
@@ -1336,6 +1340,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1336ecodec: 1340ecodec:
1337 free_irq(irq, hdmi); 1341 free_irq(irq, hdmi);
1338ereqirq: 1342ereqirq:
1343 pm_runtime_suspend(&pdev->dev);
1339 pm_runtime_disable(&pdev->dev); 1344 pm_runtime_disable(&pdev->dev);
1340 iounmap(hdmi->base); 1345 iounmap(hdmi->base);
1341emap: 1346emap:
@@ -1372,6 +1377,7 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
1372 free_irq(irq, hdmi); 1377 free_irq(irq, hdmi);
1373 /* Wait for already scheduled work */ 1378 /* Wait for already scheduled work */
1374 cancel_delayed_work_sync(&hdmi->edid_work); 1379 cancel_delayed_work_sync(&hdmi->edid_work);
1380 pm_runtime_suspend(&pdev->dev);
1375 pm_runtime_disable(&pdev->dev); 1381 pm_runtime_disable(&pdev->dev);
1376 clk_disable(hdmi->hdmi_clk); 1382 clk_disable(hdmi->hdmi_clk);
1377 clk_put(hdmi->hdmi_clk); 1383 clk_put(hdmi->hdmi_clk);
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 9bcc61b4ef14..404c03b4b7c7 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -27,6 +27,7 @@
27#include <asm/atomic.h> 27#include <asm/atomic.h>
28 28
29#include "sh_mobile_lcdcfb.h" 29#include "sh_mobile_lcdcfb.h"
30#include "sh_mobile_meram.h"
30 31
31#define SIDE_B_OFFSET 0x1000 32#define SIDE_B_OFFSET 0x1000
32#define MIRROR_OFFSET 0x2000 33#define MIRROR_OFFSET 0x2000
@@ -143,6 +144,7 @@ struct sh_mobile_lcdc_priv {
143 unsigned long saved_shared_regs[NR_SHARED_REGS]; 144 unsigned long saved_shared_regs[NR_SHARED_REGS];
144 int started; 145 int started;
145 int forced_bpp; /* 2 channel LCDC must share bpp setting */ 146 int forced_bpp; /* 2 channel LCDC must share bpp setting */
147 struct sh_mobile_meram_info *meram_dev;
146}; 148};
147 149
148static bool banked(int reg_nr) 150static bool banked(int reg_nr)
@@ -469,7 +471,6 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
469 int bpp = 0; 471 int bpp = 0;
470 unsigned long ldddsr; 472 unsigned long ldddsr;
471 int k, m; 473 int k, m;
472 int ret = 0;
473 474
474 /* enable clocks before accessing the hardware */ 475 /* enable clocks before accessing the hardware */
475 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 476 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -538,11 +539,12 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
538 lcdc_write_chan(ch, LDPMR, 0); 539 lcdc_write_chan(ch, LDPMR, 0);
539 540
540 board_cfg = &ch->cfg.board_cfg; 541 board_cfg = &ch->cfg.board_cfg;
541 if (board_cfg->setup_sys) 542 if (board_cfg->setup_sys) {
542 ret = board_cfg->setup_sys(board_cfg->board_data, ch, 543 int ret = board_cfg->setup_sys(board_cfg->board_data,
543 &sh_mobile_lcdc_sys_bus_ops); 544 ch, &sh_mobile_lcdc_sys_bus_ops);
544 if (ret) 545 if (ret)
545 return ret; 546 return ret;
547 }
546 } 548 }
547 549
548 /* word and long word swap */ 550 /* word and long word swap */
@@ -564,6 +566,9 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
564 } 566 }
565 567
566 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 568 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
569 unsigned long base_addr_y;
570 unsigned long base_addr_c = 0;
571 int pitch;
567 ch = &priv->ch[k]; 572 ch = &priv->ch[k];
568 573
569 if (!priv->ch[k].enabled) 574 if (!priv->ch[k].enabled)
@@ -598,16 +603,68 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
598 } 603 }
599 lcdc_write_chan(ch, LDDFR, tmp); 604 lcdc_write_chan(ch, LDDFR, tmp);
600 605
606 base_addr_y = ch->info->fix.smem_start;
607 base_addr_c = base_addr_y +
608 ch->info->var.xres *
609 ch->info->var.yres_virtual;
610 pitch = ch->info->fix.line_length;
611
612 /* test if we can enable meram */
613 if (ch->cfg.meram_cfg && priv->meram_dev &&
614 priv->meram_dev->ops) {
615 struct sh_mobile_meram_cfg *cfg;
616 struct sh_mobile_meram_info *mdev;
617 unsigned long icb_addr_y, icb_addr_c;
618 int icb_pitch;
619 int pf;
620
621 cfg = ch->cfg.meram_cfg;
622 mdev = priv->meram_dev;
623 /* we need to de-init configured ICBs before we
624 * we can re-initialize them.
625 */
626 if (ch->meram_enabled)
627 mdev->ops->meram_unregister(mdev, cfg);
628
629 ch->meram_enabled = 0;
630
631 if (ch->info->var.nonstd) {
632 if (ch->info->var.bits_per_pixel == 24)
633 pf = SH_MOBILE_MERAM_PF_NV24;
634 else
635 pf = SH_MOBILE_MERAM_PF_NV;
636 } else {
637 pf = SH_MOBILE_MERAM_PF_RGB;
638 }
639
640 ret = mdev->ops->meram_register(mdev, cfg, pitch,
641 ch->info->var.yres,
642 pf,
643 base_addr_y,
644 base_addr_c,
645 &icb_addr_y,
646 &icb_addr_c,
647 &icb_pitch);
648 if (!ret) {
649 /* set LDSA1R value */
650 base_addr_y = icb_addr_y;
651 pitch = icb_pitch;
652
653 /* set LDSA2R value if required */
654 if (base_addr_c)
655 base_addr_c = icb_addr_c;
656
657 ch->meram_enabled = 1;
658 }
659 }
660
601 /* point out our frame buffer */ 661 /* point out our frame buffer */
602 lcdc_write_chan(ch, LDSA1R, ch->info->fix.smem_start); 662 lcdc_write_chan(ch, LDSA1R, base_addr_y);
603 if (ch->info->var.nonstd) 663 if (ch->info->var.nonstd)
604 lcdc_write_chan(ch, LDSA2R, 664 lcdc_write_chan(ch, LDSA2R, base_addr_c);
605 ch->info->fix.smem_start +
606 ch->info->var.xres *
607 ch->info->var.yres_virtual);
608 665
609 /* set line size */ 666 /* set line size */
610 lcdc_write_chan(ch, LDMLSR, ch->info->fix.line_length); 667 lcdc_write_chan(ch, LDMLSR, pitch);
611 668
612 /* setup deferred io if SYS bus */ 669 /* setup deferred io if SYS bus */
613 tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; 670 tmp = ch->cfg.sys_bus_cfg.deferred_io_msec;
@@ -692,6 +749,17 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
692 board_cfg->display_off(board_cfg->board_data); 749 board_cfg->display_off(board_cfg->board_data);
693 module_put(board_cfg->owner); 750 module_put(board_cfg->owner);
694 } 751 }
752
753 /* disable the meram */
754 if (ch->meram_enabled) {
755 struct sh_mobile_meram_cfg *cfg;
756 struct sh_mobile_meram_info *mdev;
757 cfg = ch->cfg.meram_cfg;
758 mdev = priv->meram_dev;
759 mdev->ops->meram_unregister(mdev, cfg);
760 ch->meram_enabled = 0;
761 }
762
695 } 763 }
696 764
697 /* stop the lcdc */ 765 /* stop the lcdc */
@@ -875,9 +943,29 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
875 } else 943 } else
876 base_addr_c = 0; 944 base_addr_c = 0;
877 945
878 lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y); 946 if (!ch->meram_enabled) {
879 if (base_addr_c) 947 lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y);
880 lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c); 948 if (base_addr_c)
949 lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c);
950 } else {
951 struct sh_mobile_meram_cfg *cfg;
952 struct sh_mobile_meram_info *mdev;
953 unsigned long icb_addr_y, icb_addr_c;
954 int ret;
955
956 cfg = ch->cfg.meram_cfg;
957 mdev = priv->meram_dev;
958 ret = mdev->ops->meram_update(mdev, cfg,
959 base_addr_y, base_addr_c,
960 &icb_addr_y, &icb_addr_c);
961 if (ret)
962 return ret;
963
964 lcdc_write_chan_mirror(ch, LDSA1R, icb_addr_y);
965 if (icb_addr_c)
966 lcdc_write_chan_mirror(ch, LDSA2R, icb_addr_c);
967
968 }
881 969
882 if (lcdc_chan_is_sublcd(ch)) 970 if (lcdc_chan_is_sublcd(ch))
883 lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS); 971 lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS);
@@ -1288,7 +1376,6 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb,
1288 struct fb_info *info = event->info; 1376 struct fb_info *info = event->info;
1289 struct sh_mobile_lcdc_chan *ch = info->par; 1377 struct sh_mobile_lcdc_chan *ch = info->par;
1290 struct sh_mobile_lcdc_board_cfg *board_cfg = &ch->cfg.board_cfg; 1378 struct sh_mobile_lcdc_board_cfg *board_cfg = &ch->cfg.board_cfg;
1291 int ret;
1292 1379
1293 if (&ch->lcdc->notifier != nb) 1380 if (&ch->lcdc->notifier != nb)
1294 return NOTIFY_DONE; 1381 return NOTIFY_DONE;
@@ -1302,7 +1389,6 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb,
1302 board_cfg->display_off(board_cfg->board_data); 1389 board_cfg->display_off(board_cfg->board_data);
1303 module_put(board_cfg->owner); 1390 module_put(board_cfg->owner);
1304 } 1391 }
1305 pm_runtime_put(info->device);
1306 sh_mobile_lcdc_stop(ch->lcdc); 1392 sh_mobile_lcdc_stop(ch->lcdc);
1307 break; 1393 break;
1308 case FB_EVENT_RESUME: 1394 case FB_EVENT_RESUME:
@@ -1316,9 +1402,7 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb,
1316 module_put(board_cfg->owner); 1402 module_put(board_cfg->owner);
1317 } 1403 }
1318 1404
1319 ret = sh_mobile_lcdc_start(ch->lcdc); 1405 sh_mobile_lcdc_start(ch->lcdc);
1320 if (!ret)
1321 pm_runtime_get_sync(info->device);
1322 } 1406 }
1323 1407
1324 return NOTIFY_OK; 1408 return NOTIFY_OK;
@@ -1420,6 +1504,8 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
1420 goto err1; 1504 goto err1;
1421 } 1505 }
1422 1506
1507 priv->meram_dev = pdata->meram_dev;
1508
1423 for (i = 0; i < j; i++) { 1509 for (i = 0; i < j; i++) {
1424 struct fb_var_screeninfo *var; 1510 struct fb_var_screeninfo *var;
1425 const struct fb_videomode *lcd_cfg, *max_cfg = NULL; 1511 const struct fb_videomode *lcd_cfg, *max_cfg = NULL;
diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h
index f16cb5645a13..aeed6687e6a7 100644
--- a/drivers/video/sh_mobile_lcdcfb.h
+++ b/drivers/video/sh_mobile_lcdcfb.h
@@ -39,6 +39,7 @@ struct sh_mobile_lcdc_chan {
39 int use_count; 39 int use_count;
40 int blank_status; 40 int blank_status;
41 struct mutex open_lock; /* protects the use counter */ 41 struct mutex open_lock; /* protects the use counter */
42 int meram_enabled;
42}; 43};
43 44
44#endif 45#endif
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
new file mode 100644
index 000000000000..9170c82b495c
--- /dev/null
+++ b/drivers/video/sh_mobile_meram.c
@@ -0,0 +1,567 @@
1/*
2 * SuperH Mobile MERAM Driver for SuperH Mobile LCDC Driver
3 *
4 * Copyright (c) 2011 Damian Hobson-Garcia <dhobsong@igel.co.jp>
5 * Takanari Hayama <taki@igel.co.jp>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/platform_device.h>
18
19#include "sh_mobile_meram.h"
20
21/* meram registers */
22#define MExxCTL 0x0
23#define MExxBSIZE 0x4
24#define MExxMNCF 0x8
25#define MExxSARA 0x10
26#define MExxSARB 0x14
27#define MExxSBSIZE 0x18
28
29#define MERAM_MExxCTL_VAL(ctl, next_icb, addr) \
30 ((ctl) | (((next_icb) & 0x1f) << 11) | (((addr) & 0x7ff) << 16))
31#define MERAM_MExxBSIZE_VAL(a, b, c) \
32 (((a) << 28) | ((b) << 16) | (c))
33
34#define MEVCR1 0x4
35#define MEACTS 0x10
36#define MEQSEL1 0x40
37#define MEQSEL2 0x44
38
39/* settings */
40#define MERAM_SEC_LINE 15
41#define MERAM_LINE_WIDTH 2048
42
43/*
44 * MERAM/ICB access functions
45 */
46
47#define MERAM_ICB_OFFSET(base, idx, off) \
48 ((base) + (0x400 + ((idx) * 0x20) + (off)))
49
50static inline void meram_write_icb(void __iomem *base, int idx, int off,
51 unsigned long val)
52{
53 iowrite32(val, MERAM_ICB_OFFSET(base, idx, off));
54}
55
56static inline unsigned long meram_read_icb(void __iomem *base, int idx, int off)
57{
58 return ioread32(MERAM_ICB_OFFSET(base, idx, off));
59}
60
61static inline void meram_write_reg(void __iomem *base, int off,
62 unsigned long val)
63{
64 iowrite32(val, base + off);
65}
66
67static inline unsigned long meram_read_reg(void __iomem *base, int off)
68{
69 return ioread32(base + off);
70}
71
72/*
73 * register ICB
74 */
75
76#define MERAM_CACHE_START(p) ((p) >> 16)
77#define MERAM_CACHE_END(p) ((p) & 0xffff)
78#define MERAM_CACHE_SET(o, s) ((((o) & 0xffff) << 16) | \
79 (((o) + (s) - 1) & 0xffff))
80
81/*
82 * check if there's no overlaps in MERAM allocation.
83 */
84
85static inline int meram_check_overlap(struct sh_mobile_meram_priv *priv,
86 struct sh_mobile_meram_icb *new)
87{
88 int i;
89 int used_start, used_end, meram_start, meram_end;
90
91 /* valid ICB? */
92 if (new->marker_icb & ~0x1f || new->cache_icb & ~0x1f)
93 return 1;
94
95 if (test_bit(new->marker_icb, &priv->used_icb) ||
96 test_bit(new->cache_icb, &priv->used_icb))
97 return 1;
98
99 for (i = 0; i < priv->used_meram_cache_regions; i++) {
100 used_start = MERAM_CACHE_START(priv->used_meram_cache[i]);
101 used_end = MERAM_CACHE_END(priv->used_meram_cache[i]);
102 meram_start = new->meram_offset;
103 meram_end = new->meram_offset + new->meram_size;
104
105 if ((meram_start >= used_start && meram_start < used_end) ||
106 (meram_end > used_start && meram_end < used_end))
107 return 1;
108 }
109
110 return 0;
111}
112
113/*
114 * mark the specified ICB as used
115 */
116
117static inline void meram_mark(struct sh_mobile_meram_priv *priv,
118 struct sh_mobile_meram_icb *new)
119{
120 int n;
121
122 if (new->marker_icb < 0 || new->cache_icb < 0)
123 return;
124
125 __set_bit(new->marker_icb, &priv->used_icb);
126 __set_bit(new->cache_icb, &priv->used_icb);
127
128 n = priv->used_meram_cache_regions;
129
130 priv->used_meram_cache[n] = MERAM_CACHE_SET(new->meram_offset,
131 new->meram_size);
132
133 priv->used_meram_cache_regions++;
134}
135
136/*
137 * unmark the specified ICB as used
138 */
139
140static inline void meram_unmark(struct sh_mobile_meram_priv *priv,
141 struct sh_mobile_meram_icb *icb)
142{
143 int i;
144 unsigned long pattern;
145
146 if (icb->marker_icb < 0 || icb->cache_icb < 0)
147 return;
148
149 __clear_bit(icb->marker_icb, &priv->used_icb);
150 __clear_bit(icb->cache_icb, &priv->used_icb);
151
152 pattern = MERAM_CACHE_SET(icb->meram_offset, icb->meram_size);
153 for (i = 0; i < priv->used_meram_cache_regions; i++) {
154 if (priv->used_meram_cache[i] == pattern) {
155 while (i < priv->used_meram_cache_regions - 1) {
156 priv->used_meram_cache[i] =
157 priv->used_meram_cache[i + 1] ;
158 i++;
159 }
160 priv->used_meram_cache[i] = 0;
161 priv->used_meram_cache_regions--;
162 break;
163 }
164 }
165}
166
167/*
168 * is this a YCbCr(NV12, NV16 or NV24) colorspace
169 */
170static inline int is_nvcolor(int cspace)
171{
172 if (cspace == SH_MOBILE_MERAM_PF_NV ||
173 cspace == SH_MOBILE_MERAM_PF_NV24)
174 return 1;
175 return 0;
176}
177
178/*
179 * set the next address to fetch
180 */
181static inline void meram_set_next_addr(struct sh_mobile_meram_priv *priv,
182 struct sh_mobile_meram_cfg *cfg,
183 unsigned long base_addr_y,
184 unsigned long base_addr_c)
185{
186 unsigned long target;
187
188 target = (cfg->current_reg) ? MExxSARA : MExxSARB;
189 cfg->current_reg ^= 1;
190
191 /* set the next address to fetch */
192 meram_write_icb(priv->base, cfg->icb[0].cache_icb, target,
193 base_addr_y);
194 meram_write_icb(priv->base, cfg->icb[0].marker_icb, target,
195 base_addr_y + cfg->icb[0].cache_unit);
196
197 if (is_nvcolor(cfg->pixelformat)) {
198 meram_write_icb(priv->base, cfg->icb[1].cache_icb, target,
199 base_addr_c);
200 meram_write_icb(priv->base, cfg->icb[1].marker_icb, target,
201 base_addr_c + cfg->icb[1].cache_unit);
202 }
203}
204
205/*
206 * get the next ICB address
207 */
208static inline void meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata,
209 struct sh_mobile_meram_cfg *cfg,
210 unsigned long *icb_addr_y,
211 unsigned long *icb_addr_c)
212{
213 unsigned long icb_offset;
214
215 if (pdata->addr_mode == SH_MOBILE_MERAM_MODE0)
216 icb_offset = 0x80000000 | (cfg->current_reg << 29);
217 else
218 icb_offset = 0xc0000000 | (cfg->current_reg << 23);
219
220 *icb_addr_y = icb_offset | (cfg->icb[0].marker_icb << 24);
221 if ((*icb_addr_c) && is_nvcolor(cfg->pixelformat))
222 *icb_addr_c = icb_offset | (cfg->icb[1].marker_icb << 24);
223}
224
225#define MERAM_CALC_BYTECOUNT(x, y) \
226 (((x) * (y) + (MERAM_LINE_WIDTH - 1)) & ~(MERAM_LINE_WIDTH - 1))
227
228/*
229 * initialize MERAM
230 */
231
232static int meram_init(struct sh_mobile_meram_priv *priv,
233 struct sh_mobile_meram_icb *icb,
234 int xres, int yres, int *out_pitch)
235{
236 unsigned long total_byte_count = MERAM_CALC_BYTECOUNT(xres, yres);
237 unsigned long bnm;
238 int lcdc_pitch, xpitch, line_cnt;
239 int save_lines;
240
241 /* adjust pitch to 1024, 2048, 4096 or 8192 */
242 lcdc_pitch = (xres - 1) | 1023;
243 lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 1);
244 lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 2);
245 lcdc_pitch += 1;
246
247 /* derive settings */
248 if (lcdc_pitch == 8192 && yres >= 1024) {
249 lcdc_pitch = xpitch = MERAM_LINE_WIDTH;
250 line_cnt = total_byte_count >> 11;
251 *out_pitch = xres;
252 save_lines = (icb->meram_size / 16 / MERAM_SEC_LINE);
253 save_lines *= MERAM_SEC_LINE;
254 } else {
255 xpitch = xres;
256 line_cnt = yres;
257 *out_pitch = lcdc_pitch;
258 save_lines = icb->meram_size / (lcdc_pitch >> 10) / 2;
259 save_lines &= 0xff;
260 }
261 bnm = (save_lines - 1) << 16;
262
263 /* TODO: we better to check if we have enough MERAM buffer size */
264
265 /* set up ICB */
266 meram_write_icb(priv->base, icb->cache_icb, MExxBSIZE,
267 MERAM_MExxBSIZE_VAL(0x0, line_cnt - 1, xpitch - 1));
268 meram_write_icb(priv->base, icb->marker_icb, MExxBSIZE,
269 MERAM_MExxBSIZE_VAL(0xf, line_cnt - 1, xpitch - 1));
270
271 meram_write_icb(priv->base, icb->cache_icb, MExxMNCF, bnm);
272 meram_write_icb(priv->base, icb->marker_icb, MExxMNCF, bnm);
273
274 meram_write_icb(priv->base, icb->cache_icb, MExxSBSIZE, xpitch);
275 meram_write_icb(priv->base, icb->marker_icb, MExxSBSIZE, xpitch);
276
277 /* save a cache unit size */
278 icb->cache_unit = xres * save_lines;
279
280 /*
281 * Set MERAM for framebuffer
282 *
283 * 0x70f: WD = 0x3, WS=0x1, CM=0x1, MD=FB mode
284 * we also chain the cache_icb and the marker_icb.
285 * we also split the allocated MERAM buffer between two ICBs.
286 */
287 meram_write_icb(priv->base, icb->cache_icb, MExxCTL,
288 MERAM_MExxCTL_VAL(0x70f, icb->marker_icb,
289 icb->meram_offset));
290 meram_write_icb(priv->base, icb->marker_icb, MExxCTL,
291 MERAM_MExxCTL_VAL(0x70f, icb->cache_icb,
292 icb->meram_offset +
293 icb->meram_size / 2));
294
295 return 0;
296}
297
298static void meram_deinit(struct sh_mobile_meram_priv *priv,
299 struct sh_mobile_meram_icb *icb)
300{
301 /* disable ICB */
302 meram_write_icb(priv->base, icb->cache_icb, MExxCTL, 0);
303 meram_write_icb(priv->base, icb->marker_icb, MExxCTL, 0);
304 icb->cache_unit = 0;
305}
306
307/*
308 * register the ICB
309 */
310
311static int sh_mobile_meram_register(struct sh_mobile_meram_info *pdata,
312 struct sh_mobile_meram_cfg *cfg,
313 int xres, int yres, int pixelformat,
314 unsigned long base_addr_y,
315 unsigned long base_addr_c,
316 unsigned long *icb_addr_y,
317 unsigned long *icb_addr_c,
318 int *pitch)
319{
320 struct platform_device *pdev;
321 struct sh_mobile_meram_priv *priv;
322 int n, out_pitch;
323 int error = 0;
324
325 if (!pdata || !pdata->priv || !pdata->pdev || !cfg)
326 return -EINVAL;
327
328 if (pixelformat != SH_MOBILE_MERAM_PF_NV &&
329 pixelformat != SH_MOBILE_MERAM_PF_NV24 &&
330 pixelformat != SH_MOBILE_MERAM_PF_RGB)
331 return -EINVAL;
332
333 priv = pdata->priv;
334 pdev = pdata->pdev;
335
336 dev_dbg(&pdev->dev, "registering %dx%d (%s) (y=%08lx, c=%08lx)",
337 xres, yres, (!pixelformat) ? "yuv" : "rgb",
338 base_addr_y, base_addr_c);
339
340 mutex_lock(&priv->lock);
341
342 /* we can't handle wider than 8192px */
343 if (xres > 8192) {
344 dev_err(&pdev->dev, "width exceeding the limit (> 8192).");
345 error = -EINVAL;
346 goto err;
347 }
348
349 if (priv->used_meram_cache_regions + 2 > SH_MOBILE_MERAM_ICB_NUM) {
350 dev_err(&pdev->dev, "no more ICB available.");
351 error = -EINVAL;
352 goto err;
353 }
354
355 /* do we have at least one ICB config? */
356 if (cfg->icb[0].marker_icb < 0 || cfg->icb[0].cache_icb < 0) {
357 dev_err(&pdev->dev, "at least one ICB is required.");
358 error = -EINVAL;
359 goto err;
360 }
361
362 /* make sure that there's no overlaps */
363 if (meram_check_overlap(priv, &cfg->icb[0])) {
364 dev_err(&pdev->dev, "conflicting config detected.");
365 error = -EINVAL;
366 goto err;
367 }
368 n = 1;
369
370 /* do the same if we have the second ICB set */
371 if (cfg->icb[1].marker_icb >= 0 && cfg->icb[1].cache_icb >= 0) {
372 if (meram_check_overlap(priv, &cfg->icb[1])) {
373 dev_err(&pdev->dev, "conflicting config detected.");
374 error = -EINVAL;
375 goto err;
376 }
377 n = 2;
378 }
379
380 if (is_nvcolor(pixelformat) && n != 2) {
381 dev_err(&pdev->dev, "requires two ICB sets for planar Y/C.");
382 error = -EINVAL;
383 goto err;
384 }
385
386 /* we now register the ICB */
387 cfg->pixelformat = pixelformat;
388 meram_mark(priv, &cfg->icb[0]);
389 if (is_nvcolor(pixelformat))
390 meram_mark(priv, &cfg->icb[1]);
391
392 /* initialize MERAM */
393 meram_init(priv, &cfg->icb[0], xres, yres, &out_pitch);
394 *pitch = out_pitch;
395 if (pixelformat == SH_MOBILE_MERAM_PF_NV)
396 meram_init(priv, &cfg->icb[1], xres, (yres + 1) / 2,
397 &out_pitch);
398 else if (pixelformat == SH_MOBILE_MERAM_PF_NV24)
399 meram_init(priv, &cfg->icb[1], 2 * xres, (yres + 1) / 2,
400 &out_pitch);
401
402 cfg->current_reg = 1;
403 meram_set_next_addr(priv, cfg, base_addr_y, base_addr_c);
404 meram_get_next_icb_addr(pdata, cfg, icb_addr_y, icb_addr_c);
405
406 dev_dbg(&pdev->dev, "registered - can access via y=%08lx, c=%08lx",
407 *icb_addr_y, *icb_addr_c);
408
409err:
410 mutex_unlock(&priv->lock);
411 return error;
412}
413
414static int sh_mobile_meram_unregister(struct sh_mobile_meram_info *pdata,
415 struct sh_mobile_meram_cfg *cfg)
416{
417 struct sh_mobile_meram_priv *priv;
418
419 if (!pdata || !pdata->priv || !cfg)
420 return -EINVAL;
421
422 priv = pdata->priv;
423
424 mutex_lock(&priv->lock);
425
426 /* deinit & unmark */
427 if (is_nvcolor(cfg->pixelformat)) {
428 meram_deinit(priv, &cfg->icb[1]);
429 meram_unmark(priv, &cfg->icb[1]);
430 }
431 meram_deinit(priv, &cfg->icb[0]);
432 meram_unmark(priv, &cfg->icb[0]);
433
434 mutex_unlock(&priv->lock);
435
436 return 0;
437}
438
439static int sh_mobile_meram_update(struct sh_mobile_meram_info *pdata,
440 struct sh_mobile_meram_cfg *cfg,
441 unsigned long base_addr_y,
442 unsigned long base_addr_c,
443 unsigned long *icb_addr_y,
444 unsigned long *icb_addr_c)
445{
446 struct sh_mobile_meram_priv *priv;
447
448 if (!pdata || !pdata->priv || !cfg)
449 return -EINVAL;
450
451 priv = pdata->priv;
452
453 mutex_lock(&priv->lock);
454
455 meram_set_next_addr(priv, cfg, base_addr_y, base_addr_c);
456 meram_get_next_icb_addr(pdata, cfg, icb_addr_y, icb_addr_c);
457
458 mutex_unlock(&priv->lock);
459
460 return 0;
461}
462
463static struct sh_mobile_meram_ops sh_mobile_meram_ops = {
464 .module = THIS_MODULE,
465 .meram_register = sh_mobile_meram_register,
466 .meram_unregister = sh_mobile_meram_unregister,
467 .meram_update = sh_mobile_meram_update,
468};
469
470/*
471 * initialize MERAM
472 */
473
474static int sh_mobile_meram_remove(struct platform_device *pdev);
475
476static int __devinit sh_mobile_meram_probe(struct platform_device *pdev)
477{
478 struct sh_mobile_meram_priv *priv;
479 struct sh_mobile_meram_info *pdata = pdev->dev.platform_data;
480 struct resource *res;
481 int error;
482
483 if (!pdata) {
484 dev_err(&pdev->dev, "no platform data defined\n");
485 return -EINVAL;
486 }
487
488 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
489 if (!res) {
490 dev_err(&pdev->dev, "cannot get platform resources\n");
491 return -ENOENT;
492 }
493
494 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
495 if (!priv) {
496 dev_err(&pdev->dev, "cannot allocate device data\n");
497 return -ENOMEM;
498 }
499
500 platform_set_drvdata(pdev, priv);
501
502 /* initialize private data */
503 mutex_init(&priv->lock);
504 priv->base = ioremap_nocache(res->start, resource_size(res));
505 if (!priv->base) {
506 dev_err(&pdev->dev, "ioremap failed\n");
507 error = -EFAULT;
508 goto err;
509 }
510 pdata->ops = &sh_mobile_meram_ops;
511 pdata->priv = priv;
512 pdata->pdev = pdev;
513
514 /* initialize ICB addressing mode */
515 if (pdata->addr_mode == SH_MOBILE_MERAM_MODE1)
516 meram_write_reg(priv->base, MEVCR1, 1 << 29);
517
518 dev_info(&pdev->dev, "sh_mobile_meram initialized.");
519
520 return 0;
521
522err:
523 sh_mobile_meram_remove(pdev);
524
525 return error;
526}
527
528
529static int sh_mobile_meram_remove(struct platform_device *pdev)
530{
531 struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev);
532
533 if (priv->base)
534 iounmap(priv->base);
535
536 mutex_destroy(&priv->lock);
537
538 kfree(priv);
539
540 return 0;
541}
542
543static struct platform_driver sh_mobile_meram_driver = {
544 .driver = {
545 .name = "sh_mobile_meram",
546 .owner = THIS_MODULE,
547 },
548 .probe = sh_mobile_meram_probe,
549 .remove = sh_mobile_meram_remove,
550};
551
552static int __init sh_mobile_meram_init(void)
553{
554 return platform_driver_register(&sh_mobile_meram_driver);
555}
556
557static void __exit sh_mobile_meram_exit(void)
558{
559 platform_driver_unregister(&sh_mobile_meram_driver);
560}
561
562module_init(sh_mobile_meram_init);
563module_exit(sh_mobile_meram_exit);
564
565MODULE_DESCRIPTION("SuperH Mobile MERAM driver");
566MODULE_AUTHOR("Damian Hobson-Garcia / Takanari Hayama");
567MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/sh_mobile_meram.h b/drivers/video/sh_mobile_meram.h
new file mode 100644
index 000000000000..82c54fbce8bd
--- /dev/null
+++ b/drivers/video/sh_mobile_meram.h
@@ -0,0 +1,41 @@
1#ifndef __sh_mobile_meram_h__
2#define __sh_mobile_meram_h__
3
4#include <linux/mutex.h>
5#include <video/sh_mobile_meram.h>
6
7/*
8 * MERAM private
9 */
10
11#define MERAM_ICB_Y 0x1
12#define MERAM_ICB_C 0x2
13
14/* MERAM cache size */
15#define SH_MOBILE_MERAM_ICB_NUM 32
16
17#define SH_MOBILE_MERAM_CACHE_OFFSET(p) ((p) >> 16)
18#define SH_MOBILE_MERAM_CACHE_SIZE(p) ((p) & 0xffff)
19
20struct sh_mobile_meram_priv {
21 void __iomem *base;
22 struct mutex lock;
23 unsigned long used_icb;
24 int used_meram_cache_regions;
25 unsigned long used_meram_cache[SH_MOBILE_MERAM_ICB_NUM];
26};
27
28int sh_mobile_meram_alloc_icb(const struct sh_mobile_meram_cfg *cfg,
29 int xres,
30 int yres,
31 unsigned int base_addr,
32 int yuv_mode,
33 int *marker_icb,
34 int *out_pitch);
35
36void sh_mobile_meram_free_icb(int marker_icb);
37
38#define SH_MOBILE_MERAM_START(ind, ab) \
39 (0xC0000000 | ((ab & 0x1) << 23) | ((ind & 0x1F) << 24))
40
41#endif /* !__sh_mobile_meram_h__ */
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 56ef6b3a9851..87f0be1e78b5 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -1625,22 +1625,22 @@ static int sm501fb_start(struct sm501fb_info *info,
1625 return 0; /* everything is setup */ 1625 return 0; /* everything is setup */
1626 1626
1627 err_mem_res: 1627 err_mem_res:
1628 release_resource(info->fbmem_res); 1628 release_mem_region(info->fbmem_res->start,
1629 kfree(info->fbmem_res); 1629 resource_size(info->fbmem_res));
1630 1630
1631 err_regs2d_map: 1631 err_regs2d_map:
1632 iounmap(info->regs2d); 1632 iounmap(info->regs2d);
1633 1633
1634 err_regs2d_res: 1634 err_regs2d_res:
1635 release_resource(info->regs2d_res); 1635 release_mem_region(info->regs2d_res->start,
1636 kfree(info->regs2d_res); 1636 resource_size(info->regs2d_res));
1637 1637
1638 err_regs_map: 1638 err_regs_map:
1639 iounmap(info->regs); 1639 iounmap(info->regs);
1640 1640
1641 err_regs_res: 1641 err_regs_res:
1642 release_resource(info->regs_res); 1642 release_mem_region(info->regs_res->start,
1643 kfree(info->regs_res); 1643 resource_size(info->regs_res));
1644 1644
1645 err_release: 1645 err_release:
1646 return ret; 1646 return ret;
@@ -1652,16 +1652,16 @@ static void sm501fb_stop(struct sm501fb_info *info)
1652 sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 0); 1652 sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 0);
1653 1653
1654 iounmap(info->fbmem); 1654 iounmap(info->fbmem);
1655 release_resource(info->fbmem_res); 1655 release_mem_region(info->fbmem_res->start,
1656 kfree(info->fbmem_res); 1656 resource_size(info->fbmem_res));
1657 1657
1658 iounmap(info->regs2d); 1658 iounmap(info->regs2d);
1659 release_resource(info->regs2d_res); 1659 release_mem_region(info->regs2d_res->start,
1660 kfree(info->regs2d_res); 1660 resource_size(info->regs2d_res));
1661 1661
1662 iounmap(info->regs); 1662 iounmap(info->regs);
1663 release_resource(info->regs_res); 1663 release_mem_region(info->regs_res->start,
1664 kfree(info->regs_res); 1664 resource_size(info->regs_res));
1665} 1665}
1666 1666
1667static int sm501fb_init_fb(struct fb_info *fb, 1667static int sm501fb_init_fb(struct fb_info *fb,
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 695066b5b2e6..52b0f3e8ccac 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/prefetch.h> 30#include <linux/prefetch.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/prefetch.h>
32#include <video/udlfb.h> 33#include <video/udlfb.h>
33#include "edid.h" 34#include "edid.h"
34 35
@@ -1587,10 +1588,19 @@ static int dlfb_usb_probe(struct usb_interface *interface,
1587 goto error; 1588 goto error;
1588 } 1589 }
1589 1590
1590 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) 1591 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) {
1591 device_create_file(info->dev, &fb_device_attrs[i]); 1592 retval = device_create_file(info->dev, &fb_device_attrs[i]);
1593 if (retval) {
1594 pr_err("device_create_file failed %d\n", retval);
1595 goto err_del_attrs;
1596 }
1597 }
1592 1598
1593 device_create_bin_file(info->dev, &edid_attr); 1599 retval = device_create_bin_file(info->dev, &edid_attr);
1600 if (retval) {
1601 pr_err("device_create_bin_file failed %d\n", retval);
1602 goto err_del_attrs;
1603 }
1594 1604
1595 pr_info("DisplayLink USB device /dev/fb%d attached. %dx%d resolution." 1605 pr_info("DisplayLink USB device /dev/fb%d attached. %dx%d resolution."
1596 " Using %dK framebuffer memory\n", info->node, 1606 " Using %dK framebuffer memory\n", info->node,
@@ -1599,6 +1609,10 @@ static int dlfb_usb_probe(struct usb_interface *interface,
1599 info->fix.smem_len * 2 : info->fix.smem_len) >> 10); 1609 info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
1600 return 0; 1610 return 0;
1601 1611
1612err_del_attrs:
1613 for (i -= 1; i >= 0; i--)
1614 device_remove_file(info->dev, &fb_device_attrs[i]);
1615
1602error: 1616error:
1603 if (dev) { 1617 if (dev) {
1604 1618
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index 814ac4e213a8..0a93dc1cb4ac 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -1,6 +1,6 @@
1config 9P_FS 1config 9P_FS
2 tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)" 2 tristate "Plan 9 Resource Sharing Support (9P2000)"
3 depends on INET && NET_9P && EXPERIMENTAL 3 depends on INET && NET_9P
4 help 4 help
5 If you say Y here, you will get experimental support for 5 If you say Y here, you will get experimental support for
6 Plan 9 resource sharing via the 9P2000 protocol. 6 Plan 9 resource sharing via the 9P2000 protocol.
@@ -10,7 +10,6 @@ config 9P_FS
10 If unsure, say N. 10 If unsure, say N.
11 11
12if 9P_FS 12if 9P_FS
13
14config 9P_FSCACHE 13config 9P_FSCACHE
15 bool "Enable 9P client caching support (EXPERIMENTAL)" 14 bool "Enable 9P client caching support (EXPERIMENTAL)"
16 depends on EXPERIMENTAL 15 depends on EXPERIMENTAL
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 82a7c38ddad0..691c78f58bef 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -259,7 +259,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
259 if (IS_ERR(inode_fid)) { 259 if (IS_ERR(inode_fid)) {
260 err = PTR_ERR(inode_fid); 260 err = PTR_ERR(inode_fid);
261 mutex_unlock(&v9inode->v_mutex); 261 mutex_unlock(&v9inode->v_mutex);
262 goto error; 262 goto err_clunk_old_fid;
263 } 263 }
264 v9inode->writeback_fid = (void *) inode_fid; 264 v9inode->writeback_fid = (void *) inode_fid;
265 } 265 }
@@ -267,8 +267,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
267 /* Since we are opening a file, assign the open fid to the file */ 267 /* Since we are opening a file, assign the open fid to the file */
268 filp = lookup_instantiate_filp(nd, dentry, generic_file_open); 268 filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
269 if (IS_ERR(filp)) { 269 if (IS_ERR(filp)) {
270 p9_client_clunk(ofid); 270 err = PTR_ERR(filp);
271 return PTR_ERR(filp); 271 goto err_clunk_old_fid;
272 } 272 }
273 filp->private_data = ofid; 273 filp->private_data = ofid;
274#ifdef CONFIG_9P_FSCACHE 274#ifdef CONFIG_9P_FSCACHE
@@ -278,10 +278,11 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
278 return 0; 278 return 0;
279 279
280error: 280error:
281 if (ofid)
282 p9_client_clunk(ofid);
283 if (fid) 281 if (fid)
284 p9_client_clunk(fid); 282 p9_client_clunk(fid);
283err_clunk_old_fid:
284 if (ofid)
285 p9_client_clunk(ofid);
285 return err; 286 return err;
286} 287}
287 288
diff --git a/fs/Kconfig b/fs/Kconfig
index f3aa9b08b228..979992dcb386 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -121,9 +121,25 @@ config TMPFS
121 121
122 See <file:Documentation/filesystems/tmpfs.txt> for details. 122 See <file:Documentation/filesystems/tmpfs.txt> for details.
123 123
124config TMPFS_XATTR
125 bool "Tmpfs extended attributes"
126 depends on TMPFS
127 default n
128 help
129 Extended attributes are name:value pairs associated with inodes by
130 the kernel or by users (see the attr(5) manual page, or visit
131 <http://acl.bestbits.at/> for details).
132
133 Currently this enables support for the trusted.* and
134 security.* namespaces.
135
136 If unsure, say N.
137
138 You need this for POSIX ACL support on tmpfs.
139
124config TMPFS_POSIX_ACL 140config TMPFS_POSIX_ACL
125 bool "Tmpfs POSIX Access Control Lists" 141 bool "Tmpfs POSIX Access Control Lists"
126 depends on TMPFS 142 depends on TMPFS_XATTR
127 select GENERIC_ACL 143 select GENERIC_ACL
128 help 144 help
129 POSIX Access Control Lists (ACLs) support permissions for users and 145 POSIX Access Control Lists (ACLs) support permissions for users and
diff --git a/fs/block_dev.c b/fs/block_dev.c
index bf9c7a720371..1f2b19978333 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1238,6 +1238,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1238 res = __blkdev_get(bdev, mode, 0); 1238 res = __blkdev_get(bdev, mode, 0);
1239 1239
1240 if (whole) { 1240 if (whole) {
1241 struct gendisk *disk = whole->bd_disk;
1242
1241 /* finish claiming */ 1243 /* finish claiming */
1242 mutex_lock(&bdev->bd_mutex); 1244 mutex_lock(&bdev->bd_mutex);
1243 spin_lock(&bdev_lock); 1245 spin_lock(&bdev_lock);
@@ -1264,15 +1266,16 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1264 spin_unlock(&bdev_lock); 1266 spin_unlock(&bdev_lock);
1265 1267
1266 /* 1268 /*
1267 * Block event polling for write claims. Any write 1269 * Block event polling for write claims if requested. Any
1268 * holder makes the write_holder state stick until all 1270 * write holder makes the write_holder state stick until
1269 * are released. This is good enough and tracking 1271 * all are released. This is good enough and tracking
1270 * individual writeable reference is too fragile given 1272 * individual writeable reference is too fragile given the
1271 * the way @mode is used in blkdev_get/put(). 1273 * way @mode is used in blkdev_get/put().
1272 */ 1274 */
1273 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { 1275 if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) &&
1276 !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
1274 bdev->bd_write_holder = true; 1277 bdev->bd_write_holder = true;
1275 disk_block_events(bdev->bd_disk); 1278 disk_block_events(disk);
1276 } 1279 }
1277 1280
1278 mutex_unlock(&bdev->bd_mutex); 1281 mutex_unlock(&bdev->bd_mutex);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 38b8ab554924..33da49dc3cc6 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -848,7 +848,8 @@ get_more_pages:
848 op->payload_len = cpu_to_le32(len); 848 op->payload_len = cpu_to_le32(len);
849 req->r_request->hdr.data_len = cpu_to_le32(len); 849 req->r_request->hdr.data_len = cpu_to_le32(len);
850 850
851 ceph_osdc_start_request(&fsc->client->osdc, req, true); 851 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
852 BUG_ON(rc);
852 req = NULL; 853 req = NULL;
853 854
854 /* continue? */ 855 /* continue? */
@@ -880,8 +881,6 @@ release_pvec_pages:
880out: 881out:
881 if (req) 882 if (req)
882 ceph_osdc_put_request(req); 883 ceph_osdc_put_request(req);
883 if (rc > 0)
884 rc = 0; /* vfs expects us to return 0 */
885 ceph_put_snap_context(snapc); 884 ceph_put_snap_context(snapc);
886 dout("writepages done, rc = %d\n", rc); 885 dout("writepages done, rc = %d\n", rc);
887 return rc; 886 return rc;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 2a5404c1c42f..1f72b00447c4 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -569,7 +569,8 @@ retry:
569 list_add_tail(&cap->session_caps, &session->s_caps); 569 list_add_tail(&cap->session_caps, &session->s_caps);
570 session->s_nr_caps++; 570 session->s_nr_caps++;
571 spin_unlock(&session->s_cap_lock); 571 spin_unlock(&session->s_cap_lock);
572 } 572 } else if (new_cap)
573 ceph_put_cap(mdsc, new_cap);
573 574
574 if (!ci->i_snap_realm) { 575 if (!ci->i_snap_realm) {
575 /* 576 /*
@@ -2634,6 +2635,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2634 struct ceph_mds_session *session, 2635 struct ceph_mds_session *session,
2635 int *open_target_sessions) 2636 int *open_target_sessions)
2636{ 2637{
2638 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2637 struct ceph_inode_info *ci = ceph_inode(inode); 2639 struct ceph_inode_info *ci = ceph_inode(inode);
2638 int mds = session->s_mds; 2640 int mds = session->s_mds;
2639 unsigned mseq = le32_to_cpu(ex->migrate_seq); 2641 unsigned mseq = le32_to_cpu(ex->migrate_seq);
@@ -2670,6 +2672,19 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2670 * export targets, so that we get the matching IMPORT 2672 * export targets, so that we get the matching IMPORT
2671 */ 2673 */
2672 *open_target_sessions = 1; 2674 *open_target_sessions = 1;
2675
2676 /*
2677 * we can't flush dirty caps that we've seen the
2678 * EXPORT but no IMPORT for
2679 */
2680 spin_lock(&mdsc->cap_dirty_lock);
2681 if (!list_empty(&ci->i_dirty_item)) {
2682 dout(" moving %p to cap_dirty_migrating\n",
2683 inode);
2684 list_move(&ci->i_dirty_item,
2685 &mdsc->cap_dirty_migrating);
2686 }
2687 spin_unlock(&mdsc->cap_dirty_lock);
2673 } 2688 }
2674 __ceph_remove_cap(cap); 2689 __ceph_remove_cap(cap);
2675 } 2690 }
@@ -2707,6 +2722,13 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
2707 ci->i_cap_exporting_issued = 0; 2722 ci->i_cap_exporting_issued = 0;
2708 ci->i_cap_exporting_mseq = 0; 2723 ci->i_cap_exporting_mseq = 0;
2709 ci->i_cap_exporting_mds = -1; 2724 ci->i_cap_exporting_mds = -1;
2725
2726 spin_lock(&mdsc->cap_dirty_lock);
2727 if (!list_empty(&ci->i_dirty_item)) {
2728 dout(" moving %p back to cap_dirty\n", inode);
2729 list_move(&ci->i_dirty_item, &mdsc->cap_dirty);
2730 }
2731 spin_unlock(&mdsc->cap_dirty_lock);
2710 } else { 2732 } else {
2711 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n", 2733 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2712 inode, ci, mds, mseq); 2734 inode, ci, mds, mseq);
@@ -2910,38 +2932,16 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
2910 */ 2932 */
2911void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) 2933void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2912{ 2934{
2913 struct ceph_inode_info *ci, *nci = NULL; 2935 struct ceph_inode_info *ci;
2914 struct inode *inode, *ninode = NULL; 2936 struct inode *inode;
2915 struct list_head *p, *n;
2916 2937
2917 dout("flush_dirty_caps\n"); 2938 dout("flush_dirty_caps\n");
2918 spin_lock(&mdsc->cap_dirty_lock); 2939 spin_lock(&mdsc->cap_dirty_lock);
2919 list_for_each_safe(p, n, &mdsc->cap_dirty) { 2940 while (!list_empty(&mdsc->cap_dirty)) {
2920 if (nci) { 2941 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
2921 ci = nci; 2942 i_dirty_item);
2922 inode = ninode; 2943 inode = igrab(&ci->vfs_inode);
2923 ci->i_ceph_flags &= ~CEPH_I_NOFLUSH; 2944 dout("flush_dirty_caps %p\n", inode);
2924 dout("flush_dirty_caps inode %p (was next inode)\n",
2925 inode);
2926 } else {
2927 ci = list_entry(p, struct ceph_inode_info,
2928 i_dirty_item);
2929 inode = igrab(&ci->vfs_inode);
2930 BUG_ON(!inode);
2931 dout("flush_dirty_caps inode %p\n", inode);
2932 }
2933 if (n != &mdsc->cap_dirty) {
2934 nci = list_entry(n, struct ceph_inode_info,
2935 i_dirty_item);
2936 ninode = igrab(&nci->vfs_inode);
2937 BUG_ON(!ninode);
2938 nci->i_ceph_flags |= CEPH_I_NOFLUSH;
2939 dout("flush_dirty_caps next inode %p, noflush\n",
2940 ninode);
2941 } else {
2942 nci = NULL;
2943 ninode = NULL;
2944 }
2945 spin_unlock(&mdsc->cap_dirty_lock); 2945 spin_unlock(&mdsc->cap_dirty_lock);
2946 if (inode) { 2946 if (inode) {
2947 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, 2947 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
@@ -2951,6 +2951,7 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2951 spin_lock(&mdsc->cap_dirty_lock); 2951 spin_lock(&mdsc->cap_dirty_lock);
2952 } 2952 }
2953 spin_unlock(&mdsc->cap_dirty_lock); 2953 spin_unlock(&mdsc->cap_dirty_lock);
2954 dout("flush_dirty_caps done\n");
2954} 2955}
2955 2956
2956/* 2957/*
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 1a867a3601ae..33729e822bb9 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -360,7 +360,7 @@ more:
360 rinfo = &fi->last_readdir->r_reply_info; 360 rinfo = &fi->last_readdir->r_reply_info;
361 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 361 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
362 rinfo->dir_nr, off, fi->offset); 362 rinfo->dir_nr, off, fi->offset);
363 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) { 363 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
364 u64 pos = ceph_make_fpos(frag, off); 364 u64 pos = ceph_make_fpos(frag, off);
365 struct ceph_mds_reply_inode *in = 365 struct ceph_mds_reply_inode *in =
366 rinfo->dir_in[off - fi->offset].in; 366 rinfo->dir_in[off - fi->offset].in;
@@ -1066,16 +1066,17 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1066 struct inode *inode = file->f_dentry->d_inode; 1066 struct inode *inode = file->f_dentry->d_inode;
1067 struct ceph_inode_info *ci = ceph_inode(inode); 1067 struct ceph_inode_info *ci = ceph_inode(inode);
1068 int left; 1068 int left;
1069 const int bufsize = 1024;
1069 1070
1070 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1071 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1071 return -EISDIR; 1072 return -EISDIR;
1072 1073
1073 if (!cf->dir_info) { 1074 if (!cf->dir_info) {
1074 cf->dir_info = kmalloc(1024, GFP_NOFS); 1075 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
1075 if (!cf->dir_info) 1076 if (!cf->dir_info)
1076 return -ENOMEM; 1077 return -ENOMEM;
1077 cf->dir_info_len = 1078 cf->dir_info_len =
1078 sprintf(cf->dir_info, 1079 snprintf(cf->dir_info, bufsize,
1079 "entries: %20lld\n" 1080 "entries: %20lld\n"
1080 " files: %20lld\n" 1081 " files: %20lld\n"
1081 " subdirs: %20lld\n" 1082 " subdirs: %20lld\n"
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index e41056174bf8..a610d3d67488 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -86,6 +86,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
86static struct dentry *__fh_to_dentry(struct super_block *sb, 86static struct dentry *__fh_to_dentry(struct super_block *sb,
87 struct ceph_nfs_fh *fh) 87 struct ceph_nfs_fh *fh)
88{ 88{
89 struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
89 struct inode *inode; 90 struct inode *inode;
90 struct dentry *dentry; 91 struct dentry *dentry;
91 struct ceph_vino vino; 92 struct ceph_vino vino;
@@ -95,8 +96,24 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
95 vino.ino = fh->ino; 96 vino.ino = fh->ino;
96 vino.snap = CEPH_NOSNAP; 97 vino.snap = CEPH_NOSNAP;
97 inode = ceph_find_inode(sb, vino); 98 inode = ceph_find_inode(sb, vino);
98 if (!inode) 99 if (!inode) {
99 return ERR_PTR(-ESTALE); 100 struct ceph_mds_request *req;
101
102 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO,
103 USE_ANY_MDS);
104 if (IS_ERR(req))
105 return ERR_CAST(req);
106
107 req->r_ino1 = vino;
108 req->r_num_caps = 1;
109 err = ceph_mdsc_do_request(mdsc, NULL, req);
110 inode = req->r_target_inode;
111 if (inode)
112 igrab(inode);
113 ceph_mdsc_put_request(req);
114 if (!inode)
115 return ERR_PTR(-ESTALE);
116 }
100 117
101 dentry = d_obtain_alias(inode); 118 dentry = d_obtain_alias(inode);
102 if (IS_ERR(dentry)) { 119 if (IS_ERR(dentry)) {
@@ -148,8 +165,10 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
148 snprintf(req->r_path2, 16, "%d", cfh->parent_name_hash); 165 snprintf(req->r_path2, 16, "%d", cfh->parent_name_hash);
149 req->r_num_caps = 1; 166 req->r_num_caps = 1;
150 err = ceph_mdsc_do_request(mdsc, NULL, req); 167 err = ceph_mdsc_do_request(mdsc, NULL, req);
168 inode = req->r_target_inode;
169 if (inode)
170 igrab(inode);
151 ceph_mdsc_put_request(req); 171 ceph_mdsc_put_request(req);
152 inode = ceph_find_inode(sb, vino);
153 if (!inode) 172 if (!inode)
154 return ERR_PTR(err ? err : -ESTALE); 173 return ERR_PTR(err ? err : -ESTALE);
155 } 174 }
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index d0fae4ce9ba5..79743d146be6 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -578,6 +578,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
578 if (dir) { 578 if (dir) {
579 struct ceph_inode_info *ci = ceph_inode(dir); 579 struct ceph_inode_info *ci = ceph_inode(dir);
580 580
581 ihold(dir);
581 spin_lock(&ci->i_unsafe_lock); 582 spin_lock(&ci->i_unsafe_lock);
582 req->r_unsafe_dir = dir; 583 req->r_unsafe_dir = dir;
583 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); 584 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
@@ -598,6 +599,9 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
598 spin_lock(&ci->i_unsafe_lock); 599 spin_lock(&ci->i_unsafe_lock);
599 list_del_init(&req->r_unsafe_dir_item); 600 list_del_init(&req->r_unsafe_dir_item);
600 spin_unlock(&ci->i_unsafe_lock); 601 spin_unlock(&ci->i_unsafe_lock);
602
603 iput(req->r_unsafe_dir);
604 req->r_unsafe_dir = NULL;
601 } 605 }
602 606
603 ceph_mdsc_put_request(req); 607 ceph_mdsc_put_request(req);
@@ -2691,7 +2695,6 @@ static void handle_lease(struct ceph_mds_client *mdsc,
2691{ 2695{
2692 struct super_block *sb = mdsc->fsc->sb; 2696 struct super_block *sb = mdsc->fsc->sb;
2693 struct inode *inode; 2697 struct inode *inode;
2694 struct ceph_inode_info *ci;
2695 struct dentry *parent, *dentry; 2698 struct dentry *parent, *dentry;
2696 struct ceph_dentry_info *di; 2699 struct ceph_dentry_info *di;
2697 int mds = session->s_mds; 2700 int mds = session->s_mds;
@@ -2728,7 +2731,6 @@ static void handle_lease(struct ceph_mds_client *mdsc,
2728 dout("handle_lease no inode %llx\n", vino.ino); 2731 dout("handle_lease no inode %llx\n", vino.ino);
2729 goto release; 2732 goto release;
2730 } 2733 }
2731 ci = ceph_inode(inode);
2732 2734
2733 /* dentry */ 2735 /* dentry */
2734 parent = d_find_alias(inode); 2736 parent = d_find_alias(inode);
@@ -3002,6 +3004,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
3002 spin_lock_init(&mdsc->snap_flush_lock); 3004 spin_lock_init(&mdsc->snap_flush_lock);
3003 mdsc->cap_flush_seq = 0; 3005 mdsc->cap_flush_seq = 0;
3004 INIT_LIST_HEAD(&mdsc->cap_dirty); 3006 INIT_LIST_HEAD(&mdsc->cap_dirty);
3007 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
3005 mdsc->num_cap_flushing = 0; 3008 mdsc->num_cap_flushing = 0;
3006 spin_lock_init(&mdsc->cap_dirty_lock); 3009 spin_lock_init(&mdsc->cap_dirty_lock);
3007 init_waitqueue_head(&mdsc->cap_flushing_wq); 3010 init_waitqueue_head(&mdsc->cap_flushing_wq);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 4e3a9cc0bba6..7d8a0d662d56 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -278,6 +278,7 @@ struct ceph_mds_client {
278 278
279 u64 cap_flush_seq; 279 u64 cap_flush_seq;
280 struct list_head cap_dirty; /* inodes with dirty caps */ 280 struct list_head cap_dirty; /* inodes with dirty caps */
281 struct list_head cap_dirty_migrating; /* ...that are migration... */
281 int num_cap_flushing; /* # caps we are flushing */ 282 int num_cap_flushing; /* # caps we are flushing */
282 spinlock_t cap_dirty_lock; /* protects above items */ 283 spinlock_t cap_dirty_lock; /* protects above items */
283 wait_queue_head_t cap_flushing_wq; 284 wait_queue_head_t cap_flushing_wq;
diff --git a/fs/dcache.c b/fs/dcache.c
index 18b2a1f10ed8..37f72ee5bf7c 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1220,7 +1220,7 @@ void shrink_dcache_parent(struct dentry * parent)
1220EXPORT_SYMBOL(shrink_dcache_parent); 1220EXPORT_SYMBOL(shrink_dcache_parent);
1221 1221
1222/* 1222/*
1223 * Scan `nr' dentries and return the number which remain. 1223 * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain.
1224 * 1224 *
1225 * We need to avoid reentering the filesystem if the caller is performing a 1225 * We need to avoid reentering the filesystem if the caller is performing a
1226 * GFP_NOFS allocation attempt. One example deadlock is: 1226 * GFP_NOFS allocation attempt. One example deadlock is:
@@ -1231,8 +1231,12 @@ EXPORT_SYMBOL(shrink_dcache_parent);
1231 * 1231 *
1232 * In this case we return -1 to tell the caller that we baled. 1232 * In this case we return -1 to tell the caller that we baled.
1233 */ 1233 */
1234static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1234static int shrink_dcache_memory(struct shrinker *shrink,
1235 struct shrink_control *sc)
1235{ 1236{
1237 int nr = sc->nr_to_scan;
1238 gfp_t gfp_mask = sc->gfp_mask;
1239
1236 if (nr) { 1240 if (nr) {
1237 if (!(gfp_mask & __GFP_FS)) 1241 if (!(gfp_mask & __GFP_FS))
1238 return -1; 1242 return -1;
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 98b77c89494c..c00e055b6282 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -40,9 +40,12 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
40static void drop_slab(void) 40static void drop_slab(void)
41{ 41{
42 int nr_objects; 42 int nr_objects;
43 struct shrink_control shrink = {
44 .gfp_mask = GFP_KERNEL,
45 };
43 46
44 do { 47 do {
45 nr_objects = shrink_slab(1000, GFP_KERNEL, 1000); 48 nr_objects = shrink_slab(&shrink, 1000, 1000);
46 } while (nr_objects > 10); 49 } while (nr_objects > 10);
47} 50}
48 51
diff --git a/fs/exec.c b/fs/exec.c
index c1cf372f17a7..936f5776655c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -200,7 +200,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
200 200
201#ifdef CONFIG_STACK_GROWSUP 201#ifdef CONFIG_STACK_GROWSUP
202 if (write) { 202 if (write) {
203 ret = expand_stack_downwards(bprm->vma, pos); 203 ret = expand_downwards(bprm->vma, pos);
204 if (ret < 0) 204 if (ret < 0)
205 return NULL; 205 return NULL;
206 } 206 }
@@ -600,7 +600,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
600 unsigned long length = old_end - old_start; 600 unsigned long length = old_end - old_start;
601 unsigned long new_start = old_start - shift; 601 unsigned long new_start = old_start - shift;
602 unsigned long new_end = old_end - shift; 602 unsigned long new_end = old_end - shift;
603 struct mmu_gather *tlb; 603 struct mmu_gather tlb;
604 604
605 BUG_ON(new_start > new_end); 605 BUG_ON(new_start > new_end);
606 606
@@ -626,12 +626,12 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
626 return -ENOMEM; 626 return -ENOMEM;
627 627
628 lru_add_drain(); 628 lru_add_drain();
629 tlb = tlb_gather_mmu(mm, 0); 629 tlb_gather_mmu(&tlb, mm, 0);
630 if (new_end > old_start) { 630 if (new_end > old_start) {
631 /* 631 /*
632 * when the old and new regions overlap clear from new_end. 632 * when the old and new regions overlap clear from new_end.
633 */ 633 */
634 free_pgd_range(tlb, new_end, old_end, new_end, 634 free_pgd_range(&tlb, new_end, old_end, new_end,
635 vma->vm_next ? vma->vm_next->vm_start : 0); 635 vma->vm_next ? vma->vm_next->vm_start : 0);
636 } else { 636 } else {
637 /* 637 /*
@@ -640,10 +640,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
640 * have constraints on va-space that make this illegal (IA64) - 640 * have constraints on va-space that make this illegal (IA64) -
641 * for the others its just a little faster. 641 * for the others its just a little faster.
642 */ 642 */
643 free_pgd_range(tlb, old_start, old_end, new_end, 643 free_pgd_range(&tlb, old_start, old_end, new_end,
644 vma->vm_next ? vma->vm_next->vm_start : 0); 644 vma->vm_next ? vma->vm_next->vm_start : 0);
645 } 645 }
646 tlb_finish_mmu(tlb, new_end, old_end); 646 tlb_finish_mmu(&tlb, new_end, old_end);
647 647
648 /* 648 /*
649 * Shrink the vma to just the new range. Always succeeds. 649 * Shrink the vma to just the new range. Always succeeds.
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index 48a18f184d50..30afdfa7aec7 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -33,8 +33,6 @@ void fscache_enqueue_operation(struct fscache_operation *op)
33 _enter("{OBJ%x OP%x,%u}", 33 _enter("{OBJ%x OP%x,%u}",
34 op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 34 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
35 35
36 fscache_set_op_state(op, "EnQ");
37
38 ASSERT(list_empty(&op->pend_link)); 36 ASSERT(list_empty(&op->pend_link));
39 ASSERT(op->processor != NULL); 37 ASSERT(op->processor != NULL);
40 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); 38 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
@@ -66,8 +64,6 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
66static void fscache_run_op(struct fscache_object *object, 64static void fscache_run_op(struct fscache_object *object,
67 struct fscache_operation *op) 65 struct fscache_operation *op)
68{ 66{
69 fscache_set_op_state(op, "Run");
70
71 object->n_in_progress++; 67 object->n_in_progress++;
72 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 68 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
73 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 69 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
@@ -88,8 +84,6 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
88 84
89 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); 85 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
90 86
91 fscache_set_op_state(op, "SubmitX");
92
93 spin_lock(&object->lock); 87 spin_lock(&object->lock);
94 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 88 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
95 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 89 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
@@ -194,8 +188,6 @@ int fscache_submit_op(struct fscache_object *object,
194 188
195 ASSERTCMP(atomic_read(&op->usage), >, 0); 189 ASSERTCMP(atomic_read(&op->usage), >, 0);
196 190
197 fscache_set_op_state(op, "Submit");
198
199 spin_lock(&object->lock); 191 spin_lock(&object->lock);
200 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 192 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
201 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 193 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
@@ -335,8 +327,6 @@ void fscache_put_operation(struct fscache_operation *op)
335 if (!atomic_dec_and_test(&op->usage)) 327 if (!atomic_dec_and_test(&op->usage))
336 return; 328 return;
337 329
338 fscache_set_op_state(op, "Put");
339
340 _debug("PUT OP"); 330 _debug("PUT OP");
341 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) 331 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
342 BUG(); 332 BUG();
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 41c441c2058d..a2a5d19ece6a 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -155,11 +155,9 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
155 fscache_stat(&fscache_n_attr_changed_calls); 155 fscache_stat(&fscache_n_attr_changed_calls);
156 156
157 if (fscache_object_is_active(object)) { 157 if (fscache_object_is_active(object)) {
158 fscache_set_op_state(op, "CallFS");
159 fscache_stat(&fscache_n_cop_attr_changed); 158 fscache_stat(&fscache_n_cop_attr_changed);
160 ret = object->cache->ops->attr_changed(object); 159 ret = object->cache->ops->attr_changed(object);
161 fscache_stat_d(&fscache_n_cop_attr_changed); 160 fscache_stat_d(&fscache_n_cop_attr_changed);
162 fscache_set_op_state(op, "Done");
163 if (ret < 0) 161 if (ret < 0)
164 fscache_abort_object(object); 162 fscache_abort_object(object);
165 } 163 }
@@ -190,7 +188,6 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
190 188
191 fscache_operation_init(op, fscache_attr_changed_op, NULL); 189 fscache_operation_init(op, fscache_attr_changed_op, NULL);
192 op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE); 190 op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
193 fscache_set_op_name(op, "Attr");
194 191
195 spin_lock(&cookie->lock); 192 spin_lock(&cookie->lock);
196 193
@@ -257,7 +254,6 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
257 op->context = context; 254 op->context = context;
258 op->start_time = jiffies; 255 op->start_time = jiffies;
259 INIT_LIST_HEAD(&op->to_do); 256 INIT_LIST_HEAD(&op->to_do);
260 fscache_set_op_name(&op->op, "Retr");
261 return op; 257 return op;
262} 258}
263 259
@@ -368,7 +364,6 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
368 _leave(" = -ENOMEM"); 364 _leave(" = -ENOMEM");
369 return -ENOMEM; 365 return -ENOMEM;
370 } 366 }
371 fscache_set_op_name(&op->op, "RetrRA1");
372 367
373 spin_lock(&cookie->lock); 368 spin_lock(&cookie->lock);
374 369
@@ -487,7 +482,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
487 op = fscache_alloc_retrieval(mapping, end_io_func, context); 482 op = fscache_alloc_retrieval(mapping, end_io_func, context);
488 if (!op) 483 if (!op)
489 return -ENOMEM; 484 return -ENOMEM;
490 fscache_set_op_name(&op->op, "RetrRAN");
491 485
492 spin_lock(&cookie->lock); 486 spin_lock(&cookie->lock);
493 487
@@ -589,7 +583,6 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
589 op = fscache_alloc_retrieval(page->mapping, NULL, NULL); 583 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
590 if (!op) 584 if (!op)
591 return -ENOMEM; 585 return -ENOMEM;
592 fscache_set_op_name(&op->op, "RetrAL1");
593 586
594 spin_lock(&cookie->lock); 587 spin_lock(&cookie->lock);
595 588
@@ -662,8 +655,6 @@ static void fscache_write_op(struct fscache_operation *_op)
662 655
663 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); 656 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
664 657
665 fscache_set_op_state(&op->op, "GetPage");
666
667 spin_lock(&object->lock); 658 spin_lock(&object->lock);
668 cookie = object->cookie; 659 cookie = object->cookie;
669 660
@@ -698,15 +689,12 @@ static void fscache_write_op(struct fscache_operation *_op)
698 spin_unlock(&cookie->stores_lock); 689 spin_unlock(&cookie->stores_lock);
699 spin_unlock(&object->lock); 690 spin_unlock(&object->lock);
700 691
701 fscache_set_op_state(&op->op, "Store");
702 fscache_stat(&fscache_n_store_pages); 692 fscache_stat(&fscache_n_store_pages);
703 fscache_stat(&fscache_n_cop_write_page); 693 fscache_stat(&fscache_n_cop_write_page);
704 ret = object->cache->ops->write_page(op, page); 694 ret = object->cache->ops->write_page(op, page);
705 fscache_stat_d(&fscache_n_cop_write_page); 695 fscache_stat_d(&fscache_n_cop_write_page);
706 fscache_set_op_state(&op->op, "EndWrite");
707 fscache_end_page_write(object, page); 696 fscache_end_page_write(object, page);
708 if (ret < 0) { 697 if (ret < 0) {
709 fscache_set_op_state(&op->op, "Abort");
710 fscache_abort_object(object); 698 fscache_abort_object(object);
711 } else { 699 } else {
712 fscache_enqueue_operation(&op->op); 700 fscache_enqueue_operation(&op->op);
@@ -778,7 +766,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
778 fscache_operation_init(&op->op, fscache_write_op, 766 fscache_operation_init(&op->op, fscache_write_op,
779 fscache_release_write_op); 767 fscache_release_write_op);
780 op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING); 768 op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
781 fscache_set_op_name(&op->op, "Write1");
782 769
783 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); 770 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
784 if (ret < 0) 771 if (ret < 0)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a2a6abbccc07..2792a790e50b 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1346,11 +1346,14 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1346} 1346}
1347 1347
1348 1348
1349static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 1349static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1350 struct shrink_control *sc)
1350{ 1351{
1351 struct gfs2_glock *gl; 1352 struct gfs2_glock *gl;
1352 int may_demote; 1353 int may_demote;
1353 int nr_skipped = 0; 1354 int nr_skipped = 0;
1355 int nr = sc->nr_to_scan;
1356 gfp_t gfp_mask = sc->gfp_mask;
1354 LIST_HEAD(skipped); 1357 LIST_HEAD(skipped);
1355 1358
1356 if (nr == 0) 1359 if (nr == 0)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index e23d9864c418..42e8d23bc047 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -38,6 +38,7 @@
38 38
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/mm.h>
41#include <linux/spinlock.h> 42#include <linux/spinlock.h>
42#include <linux/completion.h> 43#include <linux/completion.h>
43#include <linux/buffer_head.h> 44#include <linux/buffer_head.h>
@@ -77,19 +78,20 @@ static LIST_HEAD(qd_lru_list);
77static atomic_t qd_lru_count = ATOMIC_INIT(0); 78static atomic_t qd_lru_count = ATOMIC_INIT(0);
78static DEFINE_SPINLOCK(qd_lru_lock); 79static DEFINE_SPINLOCK(qd_lru_lock);
79 80
80int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 81int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
81{ 82{
82 struct gfs2_quota_data *qd; 83 struct gfs2_quota_data *qd;
83 struct gfs2_sbd *sdp; 84 struct gfs2_sbd *sdp;
85 int nr_to_scan = sc->nr_to_scan;
84 86
85 if (nr == 0) 87 if (nr_to_scan == 0)
86 goto out; 88 goto out;
87 89
88 if (!(gfp_mask & __GFP_FS)) 90 if (!(sc->gfp_mask & __GFP_FS))
89 return -1; 91 return -1;
90 92
91 spin_lock(&qd_lru_lock); 93 spin_lock(&qd_lru_lock);
92 while (nr && !list_empty(&qd_lru_list)) { 94 while (nr_to_scan && !list_empty(&qd_lru_list)) {
93 qd = list_entry(qd_lru_list.next, 95 qd = list_entry(qd_lru_list.next,
94 struct gfs2_quota_data, qd_reclaim); 96 struct gfs2_quota_data, qd_reclaim);
95 sdp = qd->qd_gl->gl_sbd; 97 sdp = qd->qd_gl->gl_sbd;
@@ -110,7 +112,7 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
110 spin_unlock(&qd_lru_lock); 112 spin_unlock(&qd_lru_lock);
111 kmem_cache_free(gfs2_quotad_cachep, qd); 113 kmem_cache_free(gfs2_quotad_cachep, qd);
112 spin_lock(&qd_lru_lock); 114 spin_lock(&qd_lru_lock);
113 nr--; 115 nr_to_scan--;
114 } 116 }
115 spin_unlock(&qd_lru_lock); 117 spin_unlock(&qd_lru_lock);
116 118
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index e7d236ca48bd..90bf1c302a98 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -12,6 +12,7 @@
12 12
13struct gfs2_inode; 13struct gfs2_inode;
14struct gfs2_sbd; 14struct gfs2_sbd;
15struct shrink_control;
15 16
16#define NO_QUOTA_CHANGE ((u32)-1) 17#define NO_QUOTA_CHANGE ((u32)-1)
17 18
@@ -51,7 +52,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
51 return ret; 52 return ret;
52} 53}
53 54
54extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask); 55extern int gfs2_shrink_qd_memory(struct shrinker *shrink,
56 struct shrink_control *sc);
55extern const struct quotactl_ops gfs2_quotactl_ops; 57extern const struct quotactl_ops gfs2_quotactl_ops;
56 58
57#endif /* __QUOTA_DOT_H__ */ 59#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index b9eeb1cd03ff..e7a035781b7d 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -412,10 +412,10 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
412 pgoff = offset >> PAGE_SHIFT; 412 pgoff = offset >> PAGE_SHIFT;
413 413
414 i_size_write(inode, offset); 414 i_size_write(inode, offset);
415 spin_lock(&mapping->i_mmap_lock); 415 mutex_lock(&mapping->i_mmap_mutex);
416 if (!prio_tree_empty(&mapping->i_mmap)) 416 if (!prio_tree_empty(&mapping->i_mmap))
417 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); 417 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
418 spin_unlock(&mapping->i_mmap_lock); 418 mutex_unlock(&mapping->i_mmap_mutex);
419 truncate_hugepages(inode, offset); 419 truncate_hugepages(inode, offset);
420 return 0; 420 return 0;
421} 421}
diff --git a/fs/inode.c b/fs/inode.c
index 05f4fa521325..990d284877a1 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -326,12 +326,11 @@ void address_space_init_once(struct address_space *mapping)
326 memset(mapping, 0, sizeof(*mapping)); 326 memset(mapping, 0, sizeof(*mapping));
327 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); 327 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
328 spin_lock_init(&mapping->tree_lock); 328 spin_lock_init(&mapping->tree_lock);
329 spin_lock_init(&mapping->i_mmap_lock); 329 mutex_init(&mapping->i_mmap_mutex);
330 INIT_LIST_HEAD(&mapping->private_list); 330 INIT_LIST_HEAD(&mapping->private_list);
331 spin_lock_init(&mapping->private_lock); 331 spin_lock_init(&mapping->private_lock);
332 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); 332 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
333 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); 333 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
334 mutex_init(&mapping->unmap_mutex);
335} 334}
336EXPORT_SYMBOL(address_space_init_once); 335EXPORT_SYMBOL(address_space_init_once);
337 336
@@ -752,8 +751,12 @@ static void prune_icache(int nr_to_scan)
752 * This function is passed the number of inodes to scan, and it returns the 751 * This function is passed the number of inodes to scan, and it returns the
753 * total number of remaining possibly-reclaimable inodes. 752 * total number of remaining possibly-reclaimable inodes.
754 */ 753 */
755static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 754static int shrink_icache_memory(struct shrinker *shrink,
755 struct shrink_control *sc)
756{ 756{
757 int nr = sc->nr_to_scan;
758 gfp_t gfp_mask = sc->gfp_mask;
759
757 if (nr) { 760 if (nr) {
758 /* 761 /*
759 * Nasty deadlock avoidance. We may hold various FS locks, 762 * Nasty deadlock avoidance. We may hold various FS locks,
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 2f174be06555..8c32ef3ba88e 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -90,7 +90,8 @@ static DEFINE_SPINLOCK(mb_cache_spinlock);
90 * What the mbcache registers as to get shrunk dynamically. 90 * What the mbcache registers as to get shrunk dynamically.
91 */ 91 */
92 92
93static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); 93static int mb_cache_shrink_fn(struct shrinker *shrink,
94 struct shrink_control *sc);
94 95
95static struct shrinker mb_cache_shrinker = { 96static struct shrinker mb_cache_shrinker = {
96 .shrink = mb_cache_shrink_fn, 97 .shrink = mb_cache_shrink_fn,
@@ -156,18 +157,19 @@ forget:
156 * gets low. 157 * gets low.
157 * 158 *
158 * @shrink: (ignored) 159 * @shrink: (ignored)
159 * @nr_to_scan: Number of objects to scan 160 * @sc: shrink_control passed from reclaim
160 * @gfp_mask: (ignored)
161 * 161 *
162 * Returns the number of objects which are present in the cache. 162 * Returns the number of objects which are present in the cache.
163 */ 163 */
164static int 164static int
165mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 165mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc)
166{ 166{
167 LIST_HEAD(free_list); 167 LIST_HEAD(free_list);
168 struct mb_cache *cache; 168 struct mb_cache *cache;
169 struct mb_cache_entry *entry, *tmp; 169 struct mb_cache_entry *entry, *tmp;
170 int count = 0; 170 int count = 0;
171 int nr_to_scan = sc->nr_to_scan;
172 gfp_t gfp_mask = sc->gfp_mask;
171 173
172 mb_debug("trying to free %d entries", nr_to_scan); 174 mb_debug("trying to free %d entries", nr_to_scan);
173 spin_lock(&mb_cache_spinlock); 175 spin_lock(&mb_cache_spinlock);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 0250e4ce4893..202f370526a7 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -461,7 +461,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
461#endif 461#endif
462 struct ncp_entry_info finfo; 462 struct ncp_entry_info finfo;
463 463
464 data.wdog_pid = NULL; 464 memset(&data, 0, sizeof(data));
465 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL); 465 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
466 if (!server) 466 if (!server)
467 return -ENOMEM; 467 return -ENOMEM;
@@ -496,7 +496,6 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
496 struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data; 496 struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data;
497 497
498 data.flags = md->flags; 498 data.flags = md->flags;
499 data.int_flags = 0;
500 data.mounted_uid = md->mounted_uid; 499 data.mounted_uid = md->mounted_uid;
501 data.wdog_pid = find_get_pid(md->wdog_pid); 500 data.wdog_pid = find_get_pid(md->wdog_pid);
502 data.ncp_fd = md->ncp_fd; 501 data.ncp_fd = md->ncp_fd;
@@ -507,7 +506,6 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
507 data.file_mode = md->file_mode; 506 data.file_mode = md->file_mode;
508 data.dir_mode = md->dir_mode; 507 data.dir_mode = md->dir_mode;
509 data.info_fd = -1; 508 data.info_fd = -1;
510 data.mounted_vol[0] = 0;
511 } 509 }
512 break; 510 break;
513 default: 511 default:
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 7237672216c8..424e47773a84 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2042,11 +2042,14 @@ static void nfs_access_free_list(struct list_head *head)
2042 } 2042 }
2043} 2043}
2044 2044
2045int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 2045int nfs_access_cache_shrinker(struct shrinker *shrink,
2046 struct shrink_control *sc)
2046{ 2047{
2047 LIST_HEAD(head); 2048 LIST_HEAD(head);
2048 struct nfs_inode *nfsi, *next; 2049 struct nfs_inode *nfsi, *next;
2049 struct nfs_access_entry *cache; 2050 struct nfs_access_entry *cache;
2051 int nr_to_scan = sc->nr_to_scan;
2052 gfp_t gfp_mask = sc->gfp_mask;
2050 2053
2051 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) 2054 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
2052 return (nr_to_scan == 0) ? 0 : -1; 2055 return (nr_to_scan == 0) ? 0 : -1;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index ce118ce885dd..2df6ca7b5898 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -234,7 +234,7 @@ extern int nfs_init_client(struct nfs_client *clp,
234 234
235/* dir.c */ 235/* dir.c */
236extern int nfs_access_cache_shrinker(struct shrinker *shrink, 236extern int nfs_access_cache_shrinker(struct shrinker *shrink,
237 int nr_to_scan, gfp_t gfp_mask); 237 struct shrink_control *sc);
238 238
239/* inode.c */ 239/* inode.c */
240extern struct workqueue_struct *nfsiod_workqueue; 240extern struct workqueue_struct *nfsiod_workqueue;
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index d545e97d99c3..8ed4d3433199 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -255,7 +255,11 @@ ssize_t part_discard_alignment_show(struct device *dev,
255 struct device_attribute *attr, char *buf) 255 struct device_attribute *attr, char *buf)
256{ 256{
257 struct hd_struct *p = dev_to_part(dev); 257 struct hd_struct *p = dev_to_part(dev);
258 return sprintf(buf, "%u\n", p->discard_alignment); 258 struct gendisk *disk = dev_to_disk(dev);
259
260 return sprintf(buf, "%u\n",
261 queue_limit_discard_alignment(&disk->queue->limits,
262 p->start_sect));
259} 263}
260 264
261ssize_t part_stat_show(struct device *dev, 265ssize_t part_stat_show(struct device *dev,
@@ -449,8 +453,6 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
449 p->start_sect = start; 453 p->start_sect = start;
450 p->alignment_offset = 454 p->alignment_offset =
451 queue_limit_alignment_offset(&disk->queue->limits, start); 455 queue_limit_alignment_offset(&disk->queue->limits, start);
452 p->discard_alignment =
453 queue_limit_discard_alignment(&disk->queue->limits, start);
454 p->nr_sects = len; 456 p->nr_sects = len;
455 p->partno = partno; 457 p->partno = partno;
456 p->policy = get_disk_ro(disk); 458 p->policy = get_disk_ro(disk);
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index c03e8d3a3a5b..3763b436e69d 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -61,6 +61,14 @@ extern const struct file_operations proc_pagemap_operations;
61extern const struct file_operations proc_net_operations; 61extern const struct file_operations proc_net_operations;
62extern const struct inode_operations proc_net_inode_operations; 62extern const struct inode_operations proc_net_inode_operations;
63 63
64struct proc_maps_private {
65 struct pid *pid;
66 struct task_struct *task;
67#ifdef CONFIG_MMU
68 struct vm_area_struct *tail_vma;
69#endif
70};
71
64void proc_init_inodecache(void); 72void proc_init_inodecache(void);
65 73
66static inline struct pid *proc_pid(struct inode *inode) 74static inline struct pid *proc_pid(struct inode *inode)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 318d8654989b..2c9db29ea358 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -858,7 +858,192 @@ const struct file_operations proc_pagemap_operations = {
858#endif /* CONFIG_PROC_PAGE_MONITOR */ 858#endif /* CONFIG_PROC_PAGE_MONITOR */
859 859
860#ifdef CONFIG_NUMA 860#ifdef CONFIG_NUMA
861extern int show_numa_map(struct seq_file *m, void *v); 861
862struct numa_maps {
863 struct vm_area_struct *vma;
864 unsigned long pages;
865 unsigned long anon;
866 unsigned long active;
867 unsigned long writeback;
868 unsigned long mapcount_max;
869 unsigned long dirty;
870 unsigned long swapcache;
871 unsigned long node[MAX_NUMNODES];
872};
873
874struct numa_maps_private {
875 struct proc_maps_private proc_maps;
876 struct numa_maps md;
877};
878
879static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty)
880{
881 int count = page_mapcount(page);
882
883 md->pages++;
884 if (pte_dirty || PageDirty(page))
885 md->dirty++;
886
887 if (PageSwapCache(page))
888 md->swapcache++;
889
890 if (PageActive(page) || PageUnevictable(page))
891 md->active++;
892
893 if (PageWriteback(page))
894 md->writeback++;
895
896 if (PageAnon(page))
897 md->anon++;
898
899 if (count > md->mapcount_max)
900 md->mapcount_max = count;
901
902 md->node[page_to_nid(page)]++;
903}
904
905static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
906 unsigned long end, struct mm_walk *walk)
907{
908 struct numa_maps *md;
909 spinlock_t *ptl;
910 pte_t *orig_pte;
911 pte_t *pte;
912
913 md = walk->private;
914 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
915 do {
916 struct page *page;
917 int nid;
918
919 if (!pte_present(*pte))
920 continue;
921
922 page = vm_normal_page(md->vma, addr, *pte);
923 if (!page)
924 continue;
925
926 if (PageReserved(page))
927 continue;
928
929 nid = page_to_nid(page);
930 if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
931 continue;
932
933 gather_stats(page, md, pte_dirty(*pte));
934
935 } while (pte++, addr += PAGE_SIZE, addr != end);
936 pte_unmap_unlock(orig_pte, ptl);
937 return 0;
938}
939#ifdef CONFIG_HUGETLB_PAGE
940static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
941 unsigned long addr, unsigned long end, struct mm_walk *walk)
942{
943 struct numa_maps *md;
944 struct page *page;
945
946 if (pte_none(*pte))
947 return 0;
948
949 page = pte_page(*pte);
950 if (!page)
951 return 0;
952
953 md = walk->private;
954 gather_stats(page, md, pte_dirty(*pte));
955 return 0;
956}
957
958#else
959static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
960 unsigned long addr, unsigned long end, struct mm_walk *walk)
961{
962 return 0;
963}
964#endif
965
966/*
967 * Display pages allocated per node and memory policy via /proc.
968 */
969static int show_numa_map(struct seq_file *m, void *v)
970{
971 struct numa_maps_private *numa_priv = m->private;
972 struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
973 struct vm_area_struct *vma = v;
974 struct numa_maps *md = &numa_priv->md;
975 struct file *file = vma->vm_file;
976 struct mm_struct *mm = vma->vm_mm;
977 struct mm_walk walk = {};
978 struct mempolicy *pol;
979 int n;
980 char buffer[50];
981
982 if (!mm)
983 return 0;
984
985 /* Ensure we start with an empty set of numa_maps statistics. */
986 memset(md, 0, sizeof(*md));
987
988 md->vma = vma;
989
990 walk.hugetlb_entry = gather_hugetbl_stats;
991 walk.pmd_entry = gather_pte_stats;
992 walk.private = md;
993 walk.mm = mm;
994
995 pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
996 mpol_to_str(buffer, sizeof(buffer), pol, 0);
997 mpol_cond_put(pol);
998
999 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1000
1001 if (file) {
1002 seq_printf(m, " file=");
1003 seq_path(m, &file->f_path, "\n\t= ");
1004 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1005 seq_printf(m, " heap");
1006 } else if (vma->vm_start <= mm->start_stack &&
1007 vma->vm_end >= mm->start_stack) {
1008 seq_printf(m, " stack");
1009 }
1010
1011 walk_page_range(vma->vm_start, vma->vm_end, &walk);
1012
1013 if (!md->pages)
1014 goto out;
1015
1016 if (md->anon)
1017 seq_printf(m, " anon=%lu", md->anon);
1018
1019 if (md->dirty)
1020 seq_printf(m, " dirty=%lu", md->dirty);
1021
1022 if (md->pages != md->anon && md->pages != md->dirty)
1023 seq_printf(m, " mapped=%lu", md->pages);
1024
1025 if (md->mapcount_max > 1)
1026 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1027
1028 if (md->swapcache)
1029 seq_printf(m, " swapcache=%lu", md->swapcache);
1030
1031 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1032 seq_printf(m, " active=%lu", md->active);
1033
1034 if (md->writeback)
1035 seq_printf(m, " writeback=%lu", md->writeback);
1036
1037 for_each_node_state(n, N_HIGH_MEMORY)
1038 if (md->node[n])
1039 seq_printf(m, " N%d=%lu", n, md->node[n]);
1040out:
1041 seq_putc(m, '\n');
1042
1043 if (m->count < m->size)
1044 m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1045 return 0;
1046}
862 1047
863static const struct seq_operations proc_pid_numa_maps_op = { 1048static const struct seq_operations proc_pid_numa_maps_op = {
864 .start = m_start, 1049 .start = m_start,
@@ -869,7 +1054,20 @@ static const struct seq_operations proc_pid_numa_maps_op = {
869 1054
870static int numa_maps_open(struct inode *inode, struct file *file) 1055static int numa_maps_open(struct inode *inode, struct file *file)
871{ 1056{
872 return do_maps_open(inode, file, &proc_pid_numa_maps_op); 1057 struct numa_maps_private *priv;
1058 int ret = -ENOMEM;
1059 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1060 if (priv) {
1061 priv->proc_maps.pid = proc_pid(inode);
1062 ret = seq_open(file, &proc_pid_numa_maps_op);
1063 if (!ret) {
1064 struct seq_file *m = file->private_data;
1065 m->private = priv;
1066 } else {
1067 kfree(priv);
1068 }
1069 }
1070 return ret;
873} 1071}
874 1072
875const struct file_operations proc_numa_maps_operations = { 1073const struct file_operations proc_numa_maps_operations = {
@@ -878,4 +1076,4 @@ const struct file_operations proc_numa_maps_operations = {
878 .llseek = seq_lseek, 1076 .llseek = seq_lseek,
879 .release = seq_release_private, 1077 .release = seq_release_private,
880}; 1078};
881#endif 1079#endif /* CONFIG_NUMA */
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index d3c032f5fa0a..5b572c89e6c4 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -691,8 +691,11 @@ static void prune_dqcache(int count)
691 * This is called from kswapd when we think we need some 691 * This is called from kswapd when we think we need some
692 * more memory 692 * more memory
693 */ 693 */
694static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 694static int shrink_dqcache_memory(struct shrinker *shrink,
695 struct shrink_control *sc)
695{ 696{
697 int nr = sc->nr_to_scan;
698
696 if (nr) { 699 if (nr) {
697 spin_lock(&dq_list_lock); 700 spin_lock(&dq_list_lock);
698 prune_dqcache(nr); 701 prune_dqcache(nr);
diff --git a/fs/splice.c b/fs/splice.c
index 50a5d978da16..aa866d309695 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -162,6 +162,14 @@ static const struct pipe_buf_operations user_page_pipe_buf_ops = {
162 .get = generic_pipe_buf_get, 162 .get = generic_pipe_buf_get,
163}; 163};
164 164
165static void wakeup_pipe_readers(struct pipe_inode_info *pipe)
166{
167 smp_mb();
168 if (waitqueue_active(&pipe->wait))
169 wake_up_interruptible(&pipe->wait);
170 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
171}
172
165/** 173/**
166 * splice_to_pipe - fill passed data into a pipe 174 * splice_to_pipe - fill passed data into a pipe
167 * @pipe: pipe to fill 175 * @pipe: pipe to fill
@@ -247,12 +255,8 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
247 255
248 pipe_unlock(pipe); 256 pipe_unlock(pipe);
249 257
250 if (do_wakeup) { 258 if (do_wakeup)
251 smp_mb(); 259 wakeup_pipe_readers(pipe);
252 if (waitqueue_active(&pipe->wait))
253 wake_up_interruptible(&pipe->wait);
254 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
255 }
256 260
257 while (page_nr < spd_pages) 261 while (page_nr < spd_pages)
258 spd->spd_release(spd, page_nr++); 262 spd->spd_release(spd, page_nr++);
@@ -1892,12 +1896,9 @@ retry:
1892 /* 1896 /*
1893 * If we put data in the output pipe, wakeup any potential readers. 1897 * If we put data in the output pipe, wakeup any potential readers.
1894 */ 1898 */
1895 if (ret > 0) { 1899 if (ret > 0)
1896 smp_mb(); 1900 wakeup_pipe_readers(opipe);
1897 if (waitqueue_active(&opipe->wait)) 1901
1898 wake_up_interruptible(&opipe->wait);
1899 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1900 }
1901 if (input_wakeup) 1902 if (input_wakeup)
1902 wakeup_pipe_writers(ipipe); 1903 wakeup_pipe_writers(ipipe);
1903 1904
@@ -1976,12 +1977,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
1976 /* 1977 /*
1977 * If we put data in the output pipe, wakeup any potential readers. 1978 * If we put data in the output pipe, wakeup any potential readers.
1978 */ 1979 */
1979 if (ret > 0) { 1980 if (ret > 0)
1980 smp_mb(); 1981 wakeup_pipe_readers(opipe);
1981 if (waitqueue_active(&opipe->wait))
1982 wake_up_interruptible(&opipe->wait);
1983 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1984 }
1985 1982
1986 return ret; 1983 return ret;
1987} 1984}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 52b2b5da566e..5e68099db2a5 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1422,12 +1422,12 @@ restart:
1422int 1422int
1423xfs_buftarg_shrink( 1423xfs_buftarg_shrink(
1424 struct shrinker *shrink, 1424 struct shrinker *shrink,
1425 int nr_to_scan, 1425 struct shrink_control *sc)
1426 gfp_t mask)
1427{ 1426{
1428 struct xfs_buftarg *btp = container_of(shrink, 1427 struct xfs_buftarg *btp = container_of(shrink,
1429 struct xfs_buftarg, bt_shrinker); 1428 struct xfs_buftarg, bt_shrinker);
1430 struct xfs_buf *bp; 1429 struct xfs_buf *bp;
1430 int nr_to_scan = sc->nr_to_scan;
1431 LIST_HEAD(dispose); 1431 LIST_HEAD(dispose);
1432 1432
1433 if (!nr_to_scan) 1433 if (!nr_to_scan)
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index cb1bb2080e44..8ecad5ff9f9b 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -1032,13 +1032,14 @@ xfs_reclaim_inodes(
1032static int 1032static int
1033xfs_reclaim_inode_shrink( 1033xfs_reclaim_inode_shrink(
1034 struct shrinker *shrink, 1034 struct shrinker *shrink,
1035 int nr_to_scan, 1035 struct shrink_control *sc)
1036 gfp_t gfp_mask)
1037{ 1036{
1038 struct xfs_mount *mp; 1037 struct xfs_mount *mp;
1039 struct xfs_perag *pag; 1038 struct xfs_perag *pag;
1040 xfs_agnumber_t ag; 1039 xfs_agnumber_t ag;
1041 int reclaimable; 1040 int reclaimable;
1041 int nr_to_scan = sc->nr_to_scan;
1042 gfp_t gfp_mask = sc->gfp_mask;
1042 1043
1043 mp = container_of(shrink, struct xfs_mount, m_inode_shrink); 1044 mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
1044 if (nr_to_scan) { 1045 if (nr_to_scan) {
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 69228aa8605a..b94dace4e785 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -60,7 +60,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
60 60
61STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 61STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
62STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 62STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
63STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t); 63STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
64 64
65static struct shrinker xfs_qm_shaker = { 65static struct shrinker xfs_qm_shaker = {
66 .shrink = xfs_qm_shake, 66 .shrink = xfs_qm_shake,
@@ -2009,10 +2009,10 @@ xfs_qm_shake_freelist(
2009STATIC int 2009STATIC int
2010xfs_qm_shake( 2010xfs_qm_shake(
2011 struct shrinker *shrink, 2011 struct shrinker *shrink,
2012 int nr_to_scan, 2012 struct shrink_control *sc)
2013 gfp_t gfp_mask)
2014{ 2013{
2015 int ndqused, nfree, n; 2014 int ndqused, nfree, n;
2015 gfp_t gfp_mask = sc->gfp_mask;
2016 2016
2017 if (!kmem_shake_allow(gfp_mask)) 2017 if (!kmem_shake_allow(gfp_mask))
2018 return 0; 2018 return 0;
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
index bcbab3e4a3be..89b73e5d0fd0 100644
--- a/include/asm-generic/audit_change_attr.h
+++ b/include/asm-generic/audit_change_attr.h
@@ -1,4 +1,6 @@
1#ifdef __NR_chmod
1__NR_chmod, 2__NR_chmod,
3#endif
2__NR_fchmod, 4__NR_fchmod,
3#ifdef __NR_chown 5#ifdef __NR_chown
4__NR_chown, 6__NR_chown,
@@ -20,7 +22,9 @@ __NR_chown32,
20__NR_fchown32, 22__NR_fchown32,
21__NR_lchown32, 23__NR_lchown32,
22#endif 24#endif
25#ifdef __NR_link
23__NR_link, 26__NR_link,
27#endif
24#ifdef __NR_linkat 28#ifdef __NR_linkat
25__NR_linkat, 29__NR_linkat,
26#endif 30#endif
diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h
index 6621bd82cbe8..7b61db4fe72b 100644
--- a/include/asm-generic/audit_dir_write.h
+++ b/include/asm-generic/audit_dir_write.h
@@ -1,13 +1,27 @@
1#ifdef __NR_rename
1__NR_rename, 2__NR_rename,
3#endif
4#ifdef __NR_mkdir
2__NR_mkdir, 5__NR_mkdir,
6#endif
7#ifdef __NR_rmdir
3__NR_rmdir, 8__NR_rmdir,
9#endif
4#ifdef __NR_creat 10#ifdef __NR_creat
5__NR_creat, 11__NR_creat,
6#endif 12#endif
13#ifdef __NR_link
7__NR_link, 14__NR_link,
15#endif
16#ifdef __NR_unlink
8__NR_unlink, 17__NR_unlink,
18#endif
19#ifdef __NR_symlink
9__NR_symlink, 20__NR_symlink,
21#endif
22#ifdef __NR_mknod
10__NR_mknod, 23__NR_mknod,
24#endif
11#ifdef __NR_mkdirat 25#ifdef __NR_mkdirat
12__NR_mkdirat, 26__NR_mkdirat,
13__NR_mknodat, 27__NR_mknodat,
diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h
index 0e87464d9847..3b249cb857dc 100644
--- a/include/asm-generic/audit_read.h
+++ b/include/asm-generic/audit_read.h
@@ -1,4 +1,6 @@
1#ifdef __NR_readlink
1__NR_readlink, 2__NR_readlink,
3#endif
2__NR_quotactl, 4__NR_quotactl,
3__NR_listxattr, 5__NR_listxattr,
4__NR_llistxattr, 6__NR_llistxattr,
@@ -6,3 +8,6 @@ __NR_flistxattr,
6__NR_getxattr, 8__NR_getxattr,
7__NR_lgetxattr, 9__NR_lgetxattr,
8__NR_fgetxattr, 10__NR_fgetxattr,
11#ifdef __NR_readlinkat
12__NR_readlinkat,
13#endif
diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h
index c5f1c2c920e2..e7020c57b13b 100644
--- a/include/asm-generic/audit_write.h
+++ b/include/asm-generic/audit_write.h
@@ -4,7 +4,9 @@ __NR_acct,
4__NR_swapon, 4__NR_swapon,
5#endif 5#endif
6__NR_quotactl, 6__NR_quotactl,
7#ifdef __NR_truncate
7__NR_truncate, 8__NR_truncate,
9#endif
8#ifdef __NR_truncate64 10#ifdef __NR_truncate64
9__NR_truncate64, 11__NR_truncate64,
10#endif 12#endif
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 57b5c3c82e86..87bc536ccde3 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -24,7 +24,10 @@
24#define flush_cache_vunmap(start, end) do { } while (0) 24#define flush_cache_vunmap(start, end) do { } while (0)
25 25
26#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 26#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
27 memcpy(dst, src, len) 27 do { \
28 memcpy(dst, src, len); \
29 flush_icache_user_range(vma, page, vaddr, len); \
30 } while (0)
28#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 31#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
29 memcpy(dst, src, len) 32 memcpy(dst, src, len)
30 33
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
index 587566f95f6c..61fa862fe08d 100644
--- a/include/asm-generic/resource.h
+++ b/include/asm-generic/resource.h
@@ -78,7 +78,7 @@
78 [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ 78 [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \
79 [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ 79 [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \
80 [RLIMIT_NPROC] = { 0, 0 }, \ 80 [RLIMIT_NPROC] = { 0, 0 }, \
81 [RLIMIT_NOFILE] = { INR_OPEN, INR_OPEN }, \ 81 [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \
82 [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \ 82 [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \
83 [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \ 83 [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \
84 [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ 84 [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e43f9766259f..e58fa777fa09 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -5,6 +5,8 @@
5 * Copyright 2001 Red Hat, Inc. 5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others. 6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7 * 7 *
8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 *
8 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
@@ -17,97 +19,111 @@
17#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
18#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
19 21
22#ifdef CONFIG_HAVE_RCU_TABLE_FREE
20/* 23/*
21 * For UP we don't need to worry about TLB flush 24 * Semi RCU freeing of the page directories.
22 * and page free order so much.. 25 *
26 * This is needed by some architectures to implement software pagetable walkers.
27 *
28 * gup_fast() and other software pagetable walkers do a lockless page-table
29 * walk and therefore needs some synchronization with the freeing of the page
30 * directories. The chosen means to accomplish that is by disabling IRQs over
31 * the walk.
32 *
33 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
34 * since we unlink the page, flush TLBs, free the page. Since the disabling of
35 * IRQs delays the completion of the TLB flush we can never observe an already
36 * freed page.
37 *
38 * Architectures that do not have this (PPC) need to delay the freeing by some
39 * other means, this is that means.
40 *
41 * What we do is batch the freed directory pages (tables) and RCU free them.
42 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
43 * holds off grace periods.
44 *
45 * However, in order to batch these pages we need to allocate storage, this
46 * allocation is deep inside the MM code and can thus easily fail on memory
47 * pressure. To guarantee progress we fall back to single table freeing, see
48 * the implementation of tlb_remove_table_one().
49 *
23 */ 50 */
24#ifdef CONFIG_SMP 51struct mmu_table_batch {
25 #ifdef ARCH_FREE_PTR_NR 52 struct rcu_head rcu;
26 #define FREE_PTR_NR ARCH_FREE_PTR_NR 53 unsigned int nr;
27 #else 54 void *tables[0];
28 #define FREE_PTE_NR 506 55};
29 #endif 56
30 #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) 57#define MAX_TABLE_BATCH \
31#else 58 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
32 #define FREE_PTE_NR 1 59
33 #define tlb_fast_mode(tlb) 1 60extern void tlb_table_flush(struct mmu_gather *tlb);
61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62
34#endif 63#endif
35 64
36/* struct mmu_gather is an opaque type used by the mm code for passing around 65/*
37 * any data needed by arch specific code for tlb_remove_page. 66 * If we can't allocate a page to make a big batch of page pointers
67 * to work on, then just handle a few from the on-stack structure.
38 */ 68 */
39struct mmu_gather { 69#define MMU_GATHER_BUNDLE 8
40 struct mm_struct *mm; 70
41 unsigned int nr; /* set to ~0U means fast mode */ 71struct mmu_gather_batch {
42 unsigned int need_flush;/* Really unmapped some ptes? */ 72 struct mmu_gather_batch *next;
43 unsigned int fullmm; /* non-zero means full mm flush */ 73 unsigned int nr;
44 struct page * pages[FREE_PTE_NR]; 74 unsigned int max;
75 struct page *pages[0];
45}; 76};
46 77
47/* Users of the generic TLB shootdown code must declare this storage space. */ 78#define MAX_GATHER_BATCH \
48DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 79 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
49 80
50/* tlb_gather_mmu 81/* struct mmu_gather is an opaque type used by the mm code for passing around
51 * Return a pointer to an initialized struct mmu_gather. 82 * any data needed by arch specific code for tlb_remove_page.
52 */ 83 */
53static inline struct mmu_gather * 84struct mmu_gather {
54tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 85 struct mm_struct *mm;
55{ 86#ifdef CONFIG_HAVE_RCU_TABLE_FREE
56 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 87 struct mmu_table_batch *batch;
57 88#endif
58 tlb->mm = mm; 89 unsigned int need_flush : 1, /* Did free PTEs */
90 fast_mode : 1; /* No batching */
59 91
60 /* Use fast mode if only one CPU is online */ 92 unsigned int fullmm;
61 tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
62 93
63 tlb->fullmm = full_mm_flush; 94 struct mmu_gather_batch *active;
95 struct mmu_gather_batch local;
96 struct page *__pages[MMU_GATHER_BUNDLE];
97};
64 98
65 return tlb; 99#define HAVE_GENERIC_MMU_GATHER
66}
67 100
68static inline void 101static inline int tlb_fast_mode(struct mmu_gather *tlb)
69tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
70{ 102{
71 if (!tlb->need_flush) 103#ifdef CONFIG_SMP
72 return; 104 return tlb->fast_mode;
73 tlb->need_flush = 0; 105#else
74 tlb_flush(tlb); 106 /*
75 if (!tlb_fast_mode(tlb)) { 107 * For UP we don't need to worry about TLB flush
76 free_pages_and_swap_cache(tlb->pages, tlb->nr); 108 * and page free order so much..
77 tlb->nr = 0; 109 */
78 } 110 return 1;
111#endif
79} 112}
80 113
81/* tlb_finish_mmu 114void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
82 * Called at the end of the shootdown operation to free up any resources 115void tlb_flush_mmu(struct mmu_gather *tlb);
83 * that were required. 116void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
84 */ 117int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
85static inline void
86tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
87{
88 tlb_flush_mmu(tlb, start, end);
89
90 /* keep the page table cache within bounds */
91 check_pgt_cache();
92
93 put_cpu_var(mmu_gathers);
94}
95 118
96/* tlb_remove_page 119/* tlb_remove_page
97 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while 120 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
98 * handling the additional races in SMP caused by other CPUs caching valid 121 * required.
99 * mappings in their TLBs.
100 */ 122 */
101static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 123static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
102{ 124{
103 tlb->need_flush = 1; 125 if (!__tlb_remove_page(tlb, page))
104 if (tlb_fast_mode(tlb)) { 126 tlb_flush_mmu(tlb);
105 free_page_and_swap_cache(page);
106 return;
107 }
108 tlb->pages[tlb->nr++] = page;
109 if (tlb->nr >= FREE_PTE_NR)
110 tlb_flush_mmu(tlb, 0, 0);
111} 127}
112 128
113/** 129/**
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 07c40d5149de..33d524704883 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -24,16 +24,24 @@
24#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) 24#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64)
25#endif 25#endif
26 26
27#ifdef __SYSCALL_COMPAT
28#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _comp)
29#define __SC_COMP_3264(_nr, _32, _64, _comp) __SYSCALL(_nr, _comp)
30#else
31#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _sys)
32#define __SC_COMP_3264(_nr, _32, _64, _comp) __SC_3264(_nr, _32, _64)
33#endif
34
27#define __NR_io_setup 0 35#define __NR_io_setup 0
28__SYSCALL(__NR_io_setup, sys_io_setup) 36__SC_COMP(__NR_io_setup, sys_io_setup, compat_sys_io_setup)
29#define __NR_io_destroy 1 37#define __NR_io_destroy 1
30__SYSCALL(__NR_io_destroy, sys_io_destroy) 38__SYSCALL(__NR_io_destroy, sys_io_destroy)
31#define __NR_io_submit 2 39#define __NR_io_submit 2
32__SYSCALL(__NR_io_submit, sys_io_submit) 40__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
33#define __NR_io_cancel 3 41#define __NR_io_cancel 3
34__SYSCALL(__NR_io_cancel, sys_io_cancel) 42__SYSCALL(__NR_io_cancel, sys_io_cancel)
35#define __NR_io_getevents 4 43#define __NR_io_getevents 4
36__SYSCALL(__NR_io_getevents, sys_io_getevents) 44__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
37 45
38/* fs/xattr.c */ 46/* fs/xattr.c */
39#define __NR_setxattr 5 47#define __NR_setxattr 5
@@ -67,7 +75,7 @@ __SYSCALL(__NR_getcwd, sys_getcwd)
67 75
68/* fs/cookies.c */ 76/* fs/cookies.c */
69#define __NR_lookup_dcookie 18 77#define __NR_lookup_dcookie 18
70__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie) 78__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
71 79
72/* fs/eventfd.c */ 80/* fs/eventfd.c */
73#define __NR_eventfd2 19 81#define __NR_eventfd2 19
@@ -79,7 +87,7 @@ __SYSCALL(__NR_epoll_create1, sys_epoll_create1)
79#define __NR_epoll_ctl 21 87#define __NR_epoll_ctl 21
80__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) 88__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
81#define __NR_epoll_pwait 22 89#define __NR_epoll_pwait 22
82__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait) 90__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
83 91
84/* fs/fcntl.c */ 92/* fs/fcntl.c */
85#define __NR_dup 23 93#define __NR_dup 23
@@ -87,7 +95,7 @@ __SYSCALL(__NR_dup, sys_dup)
87#define __NR_dup3 24 95#define __NR_dup3 24
88__SYSCALL(__NR_dup3, sys_dup3) 96__SYSCALL(__NR_dup3, sys_dup3)
89#define __NR3264_fcntl 25 97#define __NR3264_fcntl 25
90__SC_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl) 98__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
91 99
92/* fs/inotify_user.c */ 100/* fs/inotify_user.c */
93#define __NR_inotify_init1 26 101#define __NR_inotify_init1 26
@@ -99,7 +107,7 @@ __SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
99 107
100/* fs/ioctl.c */ 108/* fs/ioctl.c */
101#define __NR_ioctl 29 109#define __NR_ioctl 29
102__SYSCALL(__NR_ioctl, sys_ioctl) 110__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
103 111
104/* fs/ioprio.c */ 112/* fs/ioprio.c */
105#define __NR_ioprio_set 30 113#define __NR_ioprio_set 30
@@ -129,26 +137,30 @@ __SYSCALL(__NR_renameat, sys_renameat)
129#define __NR_umount2 39 137#define __NR_umount2 39
130__SYSCALL(__NR_umount2, sys_umount) 138__SYSCALL(__NR_umount2, sys_umount)
131#define __NR_mount 40 139#define __NR_mount 40
132__SYSCALL(__NR_mount, sys_mount) 140__SC_COMP(__NR_mount, sys_mount, compat_sys_mount)
133#define __NR_pivot_root 41 141#define __NR_pivot_root 41
134__SYSCALL(__NR_pivot_root, sys_pivot_root) 142__SYSCALL(__NR_pivot_root, sys_pivot_root)
135 143
136/* fs/nfsctl.c */ 144/* fs/nfsctl.c */
137#define __NR_nfsservctl 42 145#define __NR_nfsservctl 42
138__SYSCALL(__NR_nfsservctl, sys_nfsservctl) 146__SC_COMP(__NR_nfsservctl, sys_nfsservctl, compat_sys_nfsservctl)
139 147
140/* fs/open.c */ 148/* fs/open.c */
141#define __NR3264_statfs 43 149#define __NR3264_statfs 43
142__SC_3264(__NR3264_statfs, sys_statfs64, sys_statfs) 150__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
151 compat_sys_statfs64)
143#define __NR3264_fstatfs 44 152#define __NR3264_fstatfs 44
144__SC_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs) 153__SC_COMP_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs, \
154 compat_sys_fstatfs64)
145#define __NR3264_truncate 45 155#define __NR3264_truncate 45
146__SC_3264(__NR3264_truncate, sys_truncate64, sys_truncate) 156__SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
157 compat_sys_truncate64)
147#define __NR3264_ftruncate 46 158#define __NR3264_ftruncate 46
148__SC_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate) 159__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
160 compat_sys_ftruncate64)
149 161
150#define __NR_fallocate 47 162#define __NR_fallocate 47
151__SYSCALL(__NR_fallocate, sys_fallocate) 163__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
152#define __NR_faccessat 48 164#define __NR_faccessat 48
153__SYSCALL(__NR_faccessat, sys_faccessat) 165__SYSCALL(__NR_faccessat, sys_faccessat)
154#define __NR_chdir 49 166#define __NR_chdir 49
@@ -166,7 +178,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat)
166#define __NR_fchown 55 178#define __NR_fchown 55
167__SYSCALL(__NR_fchown, sys_fchown) 179__SYSCALL(__NR_fchown, sys_fchown)
168#define __NR_openat 56 180#define __NR_openat 56
169__SYSCALL(__NR_openat, sys_openat) 181__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
170#define __NR_close 57 182#define __NR_close 57
171__SYSCALL(__NR_close, sys_close) 183__SYSCALL(__NR_close, sys_close)
172#define __NR_vhangup 58 184#define __NR_vhangup 58
@@ -182,7 +194,7 @@ __SYSCALL(__NR_quotactl, sys_quotactl)
182 194
183/* fs/readdir.c */ 195/* fs/readdir.c */
184#define __NR_getdents64 61 196#define __NR_getdents64 61
185__SYSCALL(__NR_getdents64, sys_getdents64) 197__SC_COMP(__NR_getdents64, sys_getdents64, compat_sys_getdents64)
186 198
187/* fs/read_write.c */ 199/* fs/read_write.c */
188#define __NR3264_lseek 62 200#define __NR3264_lseek 62
@@ -192,17 +204,17 @@ __SYSCALL(__NR_read, sys_read)
192#define __NR_write 64 204#define __NR_write 64
193__SYSCALL(__NR_write, sys_write) 205__SYSCALL(__NR_write, sys_write)
194#define __NR_readv 65 206#define __NR_readv 65
195__SYSCALL(__NR_readv, sys_readv) 207__SC_COMP(__NR_readv, sys_readv, compat_sys_readv)
196#define __NR_writev 66 208#define __NR_writev 66
197__SYSCALL(__NR_writev, sys_writev) 209__SC_COMP(__NR_writev, sys_writev, compat_sys_writev)
198#define __NR_pread64 67 210#define __NR_pread64 67
199__SYSCALL(__NR_pread64, sys_pread64) 211__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64)
200#define __NR_pwrite64 68 212#define __NR_pwrite64 68
201__SYSCALL(__NR_pwrite64, sys_pwrite64) 213__SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
202#define __NR_preadv 69 214#define __NR_preadv 69
203__SYSCALL(__NR_preadv, sys_preadv) 215__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
204#define __NR_pwritev 70 216#define __NR_pwritev 70
205__SYSCALL(__NR_pwritev, sys_pwritev) 217__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
206 218
207/* fs/sendfile.c */ 219/* fs/sendfile.c */
208#define __NR3264_sendfile 71 220#define __NR3264_sendfile 71
@@ -210,17 +222,17 @@ __SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile)
210 222
211/* fs/select.c */ 223/* fs/select.c */
212#define __NR_pselect6 72 224#define __NR_pselect6 72
213__SYSCALL(__NR_pselect6, sys_pselect6) 225__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
214#define __NR_ppoll 73 226#define __NR_ppoll 73
215__SYSCALL(__NR_ppoll, sys_ppoll) 227__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
216 228
217/* fs/signalfd.c */ 229/* fs/signalfd.c */
218#define __NR_signalfd4 74 230#define __NR_signalfd4 74
219__SYSCALL(__NR_signalfd4, sys_signalfd4) 231__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
220 232
221/* fs/splice.c */ 233/* fs/splice.c */
222#define __NR_vmsplice 75 234#define __NR_vmsplice 75
223__SYSCALL(__NR_vmsplice, sys_vmsplice) 235__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice)
224#define __NR_splice 76 236#define __NR_splice 76
225__SYSCALL(__NR_splice, sys_splice) 237__SYSCALL(__NR_splice, sys_splice)
226#define __NR_tee 77 238#define __NR_tee 77
@@ -243,23 +255,27 @@ __SYSCALL(__NR_fsync, sys_fsync)
243__SYSCALL(__NR_fdatasync, sys_fdatasync) 255__SYSCALL(__NR_fdatasync, sys_fdatasync)
244#ifdef __ARCH_WANT_SYNC_FILE_RANGE2 256#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
245#define __NR_sync_file_range2 84 257#define __NR_sync_file_range2 84
246__SYSCALL(__NR_sync_file_range2, sys_sync_file_range2) 258__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
259 compat_sys_sync_file_range2)
247#else 260#else
248#define __NR_sync_file_range 84 261#define __NR_sync_file_range 84
249__SYSCALL(__NR_sync_file_range, sys_sync_file_range) 262__SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
263 compat_sys_sync_file_range)
250#endif 264#endif
251 265
252/* fs/timerfd.c */ 266/* fs/timerfd.c */
253#define __NR_timerfd_create 85 267#define __NR_timerfd_create 85
254__SYSCALL(__NR_timerfd_create, sys_timerfd_create) 268__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
255#define __NR_timerfd_settime 86 269#define __NR_timerfd_settime 86
256__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) 270__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
271 compat_sys_timerfd_settime)
257#define __NR_timerfd_gettime 87 272#define __NR_timerfd_gettime 87
258__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) 273__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
274 compat_sys_timerfd_gettime)
259 275
260/* fs/utimes.c */ 276/* fs/utimes.c */
261#define __NR_utimensat 88 277#define __NR_utimensat 88
262__SYSCALL(__NR_utimensat, sys_utimensat) 278__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
263 279
264/* kernel/acct.c */ 280/* kernel/acct.c */
265#define __NR_acct 89 281#define __NR_acct 89
@@ -281,7 +297,7 @@ __SYSCALL(__NR_exit, sys_exit)
281#define __NR_exit_group 94 297#define __NR_exit_group 94
282__SYSCALL(__NR_exit_group, sys_exit_group) 298__SYSCALL(__NR_exit_group, sys_exit_group)
283#define __NR_waitid 95 299#define __NR_waitid 95
284__SYSCALL(__NR_waitid, sys_waitid) 300__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
285 301
286/* kernel/fork.c */ 302/* kernel/fork.c */
287#define __NR_set_tid_address 96 303#define __NR_set_tid_address 96
@@ -291,25 +307,27 @@ __SYSCALL(__NR_unshare, sys_unshare)
291 307
292/* kernel/futex.c */ 308/* kernel/futex.c */
293#define __NR_futex 98 309#define __NR_futex 98
294__SYSCALL(__NR_futex, sys_futex) 310__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
295#define __NR_set_robust_list 99 311#define __NR_set_robust_list 99
296__SYSCALL(__NR_set_robust_list, sys_set_robust_list) 312__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
313 compat_sys_set_robust_list)
297#define __NR_get_robust_list 100 314#define __NR_get_robust_list 100
298__SYSCALL(__NR_get_robust_list, sys_get_robust_list) 315__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
316 compat_sys_get_robust_list)
299 317
300/* kernel/hrtimer.c */ 318/* kernel/hrtimer.c */
301#define __NR_nanosleep 101 319#define __NR_nanosleep 101
302__SYSCALL(__NR_nanosleep, sys_nanosleep) 320__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
303 321
304/* kernel/itimer.c */ 322/* kernel/itimer.c */
305#define __NR_getitimer 102 323#define __NR_getitimer 102
306__SYSCALL(__NR_getitimer, sys_getitimer) 324__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
307#define __NR_setitimer 103 325#define __NR_setitimer 103
308__SYSCALL(__NR_setitimer, sys_setitimer) 326__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
309 327
310/* kernel/kexec.c */ 328/* kernel/kexec.c */
311#define __NR_kexec_load 104 329#define __NR_kexec_load 104
312__SYSCALL(__NR_kexec_load, sys_kexec_load) 330__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
313 331
314/* kernel/module.c */ 332/* kernel/module.c */
315#define __NR_init_module 105 333#define __NR_init_module 105
@@ -319,23 +337,24 @@ __SYSCALL(__NR_delete_module, sys_delete_module)
319 337
320/* kernel/posix-timers.c */ 338/* kernel/posix-timers.c */
321#define __NR_timer_create 107 339#define __NR_timer_create 107
322__SYSCALL(__NR_timer_create, sys_timer_create) 340__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
323#define __NR_timer_gettime 108 341#define __NR_timer_gettime 108
324__SYSCALL(__NR_timer_gettime, sys_timer_gettime) 342__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
325#define __NR_timer_getoverrun 109 343#define __NR_timer_getoverrun 109
326__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) 344__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
327#define __NR_timer_settime 110 345#define __NR_timer_settime 110
328__SYSCALL(__NR_timer_settime, sys_timer_settime) 346__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
329#define __NR_timer_delete 111 347#define __NR_timer_delete 111
330__SYSCALL(__NR_timer_delete, sys_timer_delete) 348__SYSCALL(__NR_timer_delete, sys_timer_delete)
331#define __NR_clock_settime 112 349#define __NR_clock_settime 112
332__SYSCALL(__NR_clock_settime, sys_clock_settime) 350__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
333#define __NR_clock_gettime 113 351#define __NR_clock_gettime 113
334__SYSCALL(__NR_clock_gettime, sys_clock_gettime) 352__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
335#define __NR_clock_getres 114 353#define __NR_clock_getres 114
336__SYSCALL(__NR_clock_getres, sys_clock_getres) 354__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
337#define __NR_clock_nanosleep 115 355#define __NR_clock_nanosleep 115
338__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep) 356__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
357 compat_sys_clock_nanosleep)
339 358
340/* kernel/printk.c */ 359/* kernel/printk.c */
341#define __NR_syslog 116 360#define __NR_syslog 116
@@ -355,9 +374,11 @@ __SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
355#define __NR_sched_getparam 121 374#define __NR_sched_getparam 121
356__SYSCALL(__NR_sched_getparam, sys_sched_getparam) 375__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
357#define __NR_sched_setaffinity 122 376#define __NR_sched_setaffinity 122
358__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity) 377__SC_COMP(__NR_sched_setaffinity, sys_sched_setaffinity, \
378 compat_sys_sched_setaffinity)
359#define __NR_sched_getaffinity 123 379#define __NR_sched_getaffinity 123
360__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity) 380__SC_COMP(__NR_sched_getaffinity, sys_sched_getaffinity, \
381 compat_sys_sched_getaffinity)
361#define __NR_sched_yield 124 382#define __NR_sched_yield 124
362__SYSCALL(__NR_sched_yield, sys_sched_yield) 383__SYSCALL(__NR_sched_yield, sys_sched_yield)
363#define __NR_sched_get_priority_max 125 384#define __NR_sched_get_priority_max 125
@@ -365,7 +386,8 @@ __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
365#define __NR_sched_get_priority_min 126 386#define __NR_sched_get_priority_min 126
366__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) 387__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
367#define __NR_sched_rr_get_interval 127 388#define __NR_sched_rr_get_interval 127
368__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval) 389__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
390 compat_sys_sched_rr_get_interval)
369 391
370/* kernel/signal.c */ 392/* kernel/signal.c */
371#define __NR_restart_syscall 128 393#define __NR_restart_syscall 128
@@ -377,21 +399,23 @@ __SYSCALL(__NR_tkill, sys_tkill)
377#define __NR_tgkill 131 399#define __NR_tgkill 131
378__SYSCALL(__NR_tgkill, sys_tgkill) 400__SYSCALL(__NR_tgkill, sys_tgkill)
379#define __NR_sigaltstack 132 401#define __NR_sigaltstack 132
380__SYSCALL(__NR_sigaltstack, sys_sigaltstack) 402__SC_COMP(__NR_sigaltstack, sys_sigaltstack, compat_sys_sigaltstack)
381#define __NR_rt_sigsuspend 133 403#define __NR_rt_sigsuspend 133
382__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend) /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 404__SC_COMP(__NR_rt_sigsuspend, sys_rt_sigsuspend, compat_sys_rt_sigsuspend)
383#define __NR_rt_sigaction 134 405#define __NR_rt_sigaction 134
384__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction) /* __ARCH_WANT_SYS_RT_SIGACTION */ 406__SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
385#define __NR_rt_sigprocmask 135 407#define __NR_rt_sigprocmask 135
386__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask) 408__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
387#define __NR_rt_sigpending 136 409#define __NR_rt_sigpending 136
388__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending) 410__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
389#define __NR_rt_sigtimedwait 137 411#define __NR_rt_sigtimedwait 137
390__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait) 412__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
413 compat_sys_rt_sigtimedwait)
391#define __NR_rt_sigqueueinfo 138 414#define __NR_rt_sigqueueinfo 138
392__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) 415__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
416 compat_sys_rt_sigqueueinfo)
393#define __NR_rt_sigreturn 139 417#define __NR_rt_sigreturn 139
394__SYSCALL(__NR_rt_sigreturn, sys_rt_sigreturn) /* sys_rt_sigreturn_wrapper, */ 418__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
395 419
396/* kernel/sys.c */ 420/* kernel/sys.c */
397#define __NR_setpriority 140 421#define __NR_setpriority 140
@@ -421,7 +445,7 @@ __SYSCALL(__NR_setfsuid, sys_setfsuid)
421#define __NR_setfsgid 152 445#define __NR_setfsgid 152
422__SYSCALL(__NR_setfsgid, sys_setfsgid) 446__SYSCALL(__NR_setfsgid, sys_setfsgid)
423#define __NR_times 153 447#define __NR_times 153
424__SYSCALL(__NR_times, sys_times) 448__SC_COMP(__NR_times, sys_times, compat_sys_times)
425#define __NR_setpgid 154 449#define __NR_setpgid 154
426__SYSCALL(__NR_setpgid, sys_setpgid) 450__SYSCALL(__NR_setpgid, sys_setpgid)
427#define __NR_getpgid 155 451#define __NR_getpgid 155
@@ -441,11 +465,11 @@ __SYSCALL(__NR_sethostname, sys_sethostname)
441#define __NR_setdomainname 162 465#define __NR_setdomainname 162
442__SYSCALL(__NR_setdomainname, sys_setdomainname) 466__SYSCALL(__NR_setdomainname, sys_setdomainname)
443#define __NR_getrlimit 163 467#define __NR_getrlimit 163
444__SYSCALL(__NR_getrlimit, sys_getrlimit) 468__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
445#define __NR_setrlimit 164 469#define __NR_setrlimit 164
446__SYSCALL(__NR_setrlimit, sys_setrlimit) 470__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
447#define __NR_getrusage 165 471#define __NR_getrusage 165
448__SYSCALL(__NR_getrusage, sys_getrusage) 472__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
449#define __NR_umask 166 473#define __NR_umask 166
450__SYSCALL(__NR_umask, sys_umask) 474__SYSCALL(__NR_umask, sys_umask)
451#define __NR_prctl 167 475#define __NR_prctl 167
@@ -455,11 +479,11 @@ __SYSCALL(__NR_getcpu, sys_getcpu)
455 479
456/* kernel/time.c */ 480/* kernel/time.c */
457#define __NR_gettimeofday 169 481#define __NR_gettimeofday 169
458__SYSCALL(__NR_gettimeofday, sys_gettimeofday) 482__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
459#define __NR_settimeofday 170 483#define __NR_settimeofday 170
460__SYSCALL(__NR_settimeofday, sys_settimeofday) 484__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
461#define __NR_adjtimex 171 485#define __NR_adjtimex 171
462__SYSCALL(__NR_adjtimex, sys_adjtimex) 486__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
463 487
464/* kernel/timer.c */ 488/* kernel/timer.c */
465#define __NR_getpid 172 489#define __NR_getpid 172
@@ -477,39 +501,40 @@ __SYSCALL(__NR_getegid, sys_getegid)
477#define __NR_gettid 178 501#define __NR_gettid 178
478__SYSCALL(__NR_gettid, sys_gettid) 502__SYSCALL(__NR_gettid, sys_gettid)
479#define __NR_sysinfo 179 503#define __NR_sysinfo 179
480__SYSCALL(__NR_sysinfo, sys_sysinfo) 504__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
481 505
482/* ipc/mqueue.c */ 506/* ipc/mqueue.c */
483#define __NR_mq_open 180 507#define __NR_mq_open 180
484__SYSCALL(__NR_mq_open, sys_mq_open) 508__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
485#define __NR_mq_unlink 181 509#define __NR_mq_unlink 181
486__SYSCALL(__NR_mq_unlink, sys_mq_unlink) 510__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
487#define __NR_mq_timedsend 182 511#define __NR_mq_timedsend 182
488__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend) 512__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
489#define __NR_mq_timedreceive 183 513#define __NR_mq_timedreceive 183
490__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive) 514__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
515 compat_sys_mq_timedreceive)
491#define __NR_mq_notify 184 516#define __NR_mq_notify 184
492__SYSCALL(__NR_mq_notify, sys_mq_notify) 517__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
493#define __NR_mq_getsetattr 185 518#define __NR_mq_getsetattr 185
494__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr) 519__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
495 520
496/* ipc/msg.c */ 521/* ipc/msg.c */
497#define __NR_msgget 186 522#define __NR_msgget 186
498__SYSCALL(__NR_msgget, sys_msgget) 523__SYSCALL(__NR_msgget, sys_msgget)
499#define __NR_msgctl 187 524#define __NR_msgctl 187
500__SYSCALL(__NR_msgctl, sys_msgctl) 525__SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
501#define __NR_msgrcv 188 526#define __NR_msgrcv 188
502__SYSCALL(__NR_msgrcv, sys_msgrcv) 527__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
503#define __NR_msgsnd 189 528#define __NR_msgsnd 189
504__SYSCALL(__NR_msgsnd, sys_msgsnd) 529__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
505 530
506/* ipc/sem.c */ 531/* ipc/sem.c */
507#define __NR_semget 190 532#define __NR_semget 190
508__SYSCALL(__NR_semget, sys_semget) 533__SYSCALL(__NR_semget, sys_semget)
509#define __NR_semctl 191 534#define __NR_semctl 191
510__SYSCALL(__NR_semctl, sys_semctl) 535__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
511#define __NR_semtimedop 192 536#define __NR_semtimedop 192
512__SYSCALL(__NR_semtimedop, sys_semtimedop) 537__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
513#define __NR_semop 193 538#define __NR_semop 193
514__SYSCALL(__NR_semop, sys_semop) 539__SYSCALL(__NR_semop, sys_semop)
515 540
@@ -517,9 +542,9 @@ __SYSCALL(__NR_semop, sys_semop)
517#define __NR_shmget 194 542#define __NR_shmget 194
518__SYSCALL(__NR_shmget, sys_shmget) 543__SYSCALL(__NR_shmget, sys_shmget)
519#define __NR_shmctl 195 544#define __NR_shmctl 195
520__SYSCALL(__NR_shmctl, sys_shmctl) 545__SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
521#define __NR_shmat 196 546#define __NR_shmat 196
522__SYSCALL(__NR_shmat, sys_shmat) 547__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
523#define __NR_shmdt 197 548#define __NR_shmdt 197
524__SYSCALL(__NR_shmdt, sys_shmdt) 549__SYSCALL(__NR_shmdt, sys_shmdt)
525 550
@@ -543,21 +568,21 @@ __SYSCALL(__NR_getpeername, sys_getpeername)
543#define __NR_sendto 206 568#define __NR_sendto 206
544__SYSCALL(__NR_sendto, sys_sendto) 569__SYSCALL(__NR_sendto, sys_sendto)
545#define __NR_recvfrom 207 570#define __NR_recvfrom 207
546__SYSCALL(__NR_recvfrom, sys_recvfrom) 571__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
547#define __NR_setsockopt 208 572#define __NR_setsockopt 208
548__SYSCALL(__NR_setsockopt, sys_setsockopt) 573__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
549#define __NR_getsockopt 209 574#define __NR_getsockopt 209
550__SYSCALL(__NR_getsockopt, sys_getsockopt) 575__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
551#define __NR_shutdown 210 576#define __NR_shutdown 210
552__SYSCALL(__NR_shutdown, sys_shutdown) 577__SYSCALL(__NR_shutdown, sys_shutdown)
553#define __NR_sendmsg 211 578#define __NR_sendmsg 211
554__SYSCALL(__NR_sendmsg, sys_sendmsg) 579__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
555#define __NR_recvmsg 212 580#define __NR_recvmsg 212
556__SYSCALL(__NR_recvmsg, sys_recvmsg) 581__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
557 582
558/* mm/filemap.c */ 583/* mm/filemap.c */
559#define __NR_readahead 213 584#define __NR_readahead 213
560__SYSCALL(__NR_readahead, sys_readahead) 585__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
561 586
562/* mm/nommu.c, also with MMU */ 587/* mm/nommu.c, also with MMU */
563#define __NR_brk 214 588#define __NR_brk 214
@@ -573,19 +598,19 @@ __SYSCALL(__NR_add_key, sys_add_key)
573#define __NR_request_key 218 598#define __NR_request_key 218
574__SYSCALL(__NR_request_key, sys_request_key) 599__SYSCALL(__NR_request_key, sys_request_key)
575#define __NR_keyctl 219 600#define __NR_keyctl 219
576__SYSCALL(__NR_keyctl, sys_keyctl) 601__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
577 602
578/* arch/example/kernel/sys_example.c */ 603/* arch/example/kernel/sys_example.c */
579#define __NR_clone 220 604#define __NR_clone 220
580__SYSCALL(__NR_clone, sys_clone) /* .long sys_clone_wrapper */ 605__SYSCALL(__NR_clone, sys_clone)
581#define __NR_execve 221 606#define __NR_execve 221
582__SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */ 607__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
583 608
584#define __NR3264_mmap 222 609#define __NR3264_mmap 222
585__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) 610__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
586/* mm/fadvise.c */ 611/* mm/fadvise.c */
587#define __NR3264_fadvise64 223 612#define __NR3264_fadvise64 223
588__SYSCALL(__NR3264_fadvise64, sys_fadvise64_64) 613__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
589 614
590/* mm/, CONFIG_MMU only */ 615/* mm/, CONFIG_MMU only */
591#ifndef __ARCH_NOMMU 616#ifndef __ARCH_NOMMU
@@ -612,25 +637,26 @@ __SYSCALL(__NR_madvise, sys_madvise)
612#define __NR_remap_file_pages 234 637#define __NR_remap_file_pages 234
613__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) 638__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
614#define __NR_mbind 235 639#define __NR_mbind 235
615__SYSCALL(__NR_mbind, sys_mbind) 640__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
616#define __NR_get_mempolicy 236 641#define __NR_get_mempolicy 236
617__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) 642__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
618#define __NR_set_mempolicy 237 643#define __NR_set_mempolicy 237
619__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) 644__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
620#define __NR_migrate_pages 238 645#define __NR_migrate_pages 238
621__SYSCALL(__NR_migrate_pages, sys_migrate_pages) 646__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
622#define __NR_move_pages 239 647#define __NR_move_pages 239
623__SYSCALL(__NR_move_pages, sys_move_pages) 648__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
624#endif 649#endif
625 650
626#define __NR_rt_tgsigqueueinfo 240 651#define __NR_rt_tgsigqueueinfo 240
627__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) 652__SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
653 compat_sys_rt_tgsigqueueinfo)
628#define __NR_perf_event_open 241 654#define __NR_perf_event_open 241
629__SYSCALL(__NR_perf_event_open, sys_perf_event_open) 655__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
630#define __NR_accept4 242 656#define __NR_accept4 242
631__SYSCALL(__NR_accept4, sys_accept4) 657__SYSCALL(__NR_accept4, sys_accept4)
632#define __NR_recvmmsg 243 658#define __NR_recvmmsg 243
633__SYSCALL(__NR_recvmmsg, sys_recvmmsg) 659__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
634 660
635/* 661/*
636 * Architectures may provide up to 16 syscalls of their own 662 * Architectures may provide up to 16 syscalls of their own
@@ -639,19 +665,20 @@ __SYSCALL(__NR_recvmmsg, sys_recvmmsg)
639#define __NR_arch_specific_syscall 244 665#define __NR_arch_specific_syscall 244
640 666
641#define __NR_wait4 260 667#define __NR_wait4 260
642__SYSCALL(__NR_wait4, sys_wait4) 668__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
643#define __NR_prlimit64 261 669#define __NR_prlimit64 261
644__SYSCALL(__NR_prlimit64, sys_prlimit64) 670__SYSCALL(__NR_prlimit64, sys_prlimit64)
645#define __NR_fanotify_init 262 671#define __NR_fanotify_init 262
646__SYSCALL(__NR_fanotify_init, sys_fanotify_init) 672__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
647#define __NR_fanotify_mark 263 673#define __NR_fanotify_mark 263
648__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) 674__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
649#define __NR_name_to_handle_at 264 675#define __NR_name_to_handle_at 264
650__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) 676__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
651#define __NR_open_by_handle_at 265 677#define __NR_open_by_handle_at 265
652__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at) 678__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
679 compat_sys_open_by_handle_at)
653#define __NR_clock_adjtime 266 680#define __NR_clock_adjtime 266
654__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime) 681__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
655#define __NR_syncfs 267 682#define __NR_syncfs 267
656__SYSCALL(__NR_syncfs, sys_syncfs) 683__SYSCALL(__NR_syncfs, sys_syncfs)
657 684
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index cb1ded2bd545..ccfedb4f3eb0 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -302,6 +302,7 @@ header-y += ppp-comp.h
302header-y += ppp_defs.h 302header-y += ppp_defs.h
303header-y += pps.h 303header-y += pps.h
304header-y += prctl.h 304header-y += prctl.h
305header-y += ptp_clock.h
305header-y += ptrace.h 306header-y += ptrace.h
306header-y += qnx4_fs.h 307header-y += qnx4_fs.h
307header-y += qnxtypes.h 308header-y += qnxtypes.h
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index daf8c480c786..dcafe0bf0005 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -55,7 +55,8 @@
55 * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf 55 * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
56 * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf 56 * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
57 * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf 57 * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
58 * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from list 58 * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
59 * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
59 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region 60 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
60 * bitmap_release_region(bitmap, pos, order) Free specified bit region 61 * bitmap_release_region(bitmap, pos, order) Free specified bit region
61 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region 62 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
@@ -129,6 +130,8 @@ extern int bitmap_scnlistprintf(char *buf, unsigned int len,
129 const unsigned long *src, int nbits); 130 const unsigned long *src, int nbits);
130extern int bitmap_parselist(const char *buf, unsigned long *maskp, 131extern int bitmap_parselist(const char *buf, unsigned long *maskp,
131 int nmaskbits); 132 int nmaskbits);
133extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
134 unsigned long *dst, int nbits);
132extern void bitmap_remap(unsigned long *dst, const unsigned long *src, 135extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
133 const unsigned long *old, const unsigned long *new, int bits); 136 const unsigned long *old, const unsigned long *new, int bits);
134extern int bitmap_bitremap(int oldbit, 137extern int bitmap_bitremap(int oldbit,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index be50d9e70a7d..2a7cea53ca0d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -151,7 +151,6 @@ enum rq_flag_bits {
151 __REQ_IO_STAT, /* account I/O stat */ 151 __REQ_IO_STAT, /* account I/O stat */
152 __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 152 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
153 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ 153 __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
154 __REQ_ON_PLUG, /* on plug list */
155 __REQ_NR_BITS, /* stops here */ 154 __REQ_NR_BITS, /* stops here */
156}; 155};
157 156
@@ -192,6 +191,5 @@ enum rq_flag_bits {
192#define REQ_IO_STAT (1 << __REQ_IO_STAT) 191#define REQ_IO_STAT (1 << __REQ_IO_STAT)
193#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) 192#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
194#define REQ_SECURE (1 << __REQ_SECURE) 193#define REQ_SECURE (1 << __REQ_SECURE)
195#define REQ_ON_PLUG (1 << __REQ_ON_PLUG)
196 194
197#endif /* __LINUX_BLK_TYPES_H */ 195#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2ad95fa1d130..ae9091a68480 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -257,7 +257,7 @@ struct queue_limits {
257 unsigned char misaligned; 257 unsigned char misaligned;
258 unsigned char discard_misaligned; 258 unsigned char discard_misaligned;
259 unsigned char cluster; 259 unsigned char cluster;
260 signed char discard_zeroes_data; 260 unsigned char discard_zeroes_data;
261}; 261};
262 262
263struct request_queue 263struct request_queue
@@ -364,6 +364,8 @@ struct request_queue
364 * for flush operations 364 * for flush operations
365 */ 365 */
366 unsigned int flush_flags; 366 unsigned int flush_flags;
367 unsigned int flush_not_queueable:1;
368 unsigned int flush_queue_delayed:1;
367 unsigned int flush_pending_idx:1; 369 unsigned int flush_pending_idx:1;
368 unsigned int flush_running_idx:1; 370 unsigned int flush_running_idx:1;
369 unsigned long flush_pending_since; 371 unsigned long flush_pending_since;
@@ -843,6 +845,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
843extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 845extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
844extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 846extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
845extern void blk_queue_flush(struct request_queue *q, unsigned int flush); 847extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
848extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
846extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 849extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
847 850
848extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 851extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
@@ -1066,13 +1069,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
1066{ 1069{
1067 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1070 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1068 1071
1072 if (!lim->max_discard_sectors)
1073 return 0;
1074
1069 return (lim->discard_granularity + lim->discard_alignment - alignment) 1075 return (lim->discard_granularity + lim->discard_alignment - alignment)
1070 & (lim->discard_granularity - 1); 1076 & (lim->discard_granularity - 1);
1071} 1077}
1072 1078
1073static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1079static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1074{ 1080{
1075 if (q->limits.discard_zeroes_data == 1) 1081 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
1076 return 1; 1082 return 1;
1077 1083
1078 return 0; 1084 return 0;
@@ -1111,6 +1117,11 @@ static inline unsigned int block_size(struct block_device *bdev)
1111 return bdev->bd_block_size; 1117 return bdev->bd_block_size;
1112} 1118}
1113 1119
1120static inline bool queue_flush_queueable(struct request_queue *q)
1121{
1122 return !q->flush_not_queueable;
1123}
1124
1114typedef struct {struct page *v;} Sector; 1125typedef struct {struct page *v;} Sector;
1115 1126
1116unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1127unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 01eca1794e14..ab344a521105 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -99,24 +99,31 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
99 unsigned long align, 99 unsigned long align,
100 unsigned long goal); 100 unsigned long goal);
101 101
102#ifdef CONFIG_NO_BOOTMEM
103/* We are using top down, so it is safe to use 0 here */
104#define BOOTMEM_LOW_LIMIT 0
105#else
106#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
107#endif
108
102#define alloc_bootmem(x) \ 109#define alloc_bootmem(x) \
103 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 110 __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
104#define alloc_bootmem_align(x, align) \ 111#define alloc_bootmem_align(x, align) \
105 __alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS)) 112 __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
106#define alloc_bootmem_nopanic(x) \ 113#define alloc_bootmem_nopanic(x) \
107 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 114 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
108#define alloc_bootmem_pages(x) \ 115#define alloc_bootmem_pages(x) \
109 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 116 __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
110#define alloc_bootmem_pages_nopanic(x) \ 117#define alloc_bootmem_pages_nopanic(x) \
111 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 118 __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
112#define alloc_bootmem_node(pgdat, x) \ 119#define alloc_bootmem_node(pgdat, x) \
113 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 120 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
114#define alloc_bootmem_node_nopanic(pgdat, x) \ 121#define alloc_bootmem_node_nopanic(pgdat, x) \
115 __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 122 __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
116#define alloc_bootmem_pages_node(pgdat, x) \ 123#define alloc_bootmem_pages_node(pgdat, x) \
117 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 124 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
118#define alloc_bootmem_pages_node_nopanic(pgdat, x) \ 125#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
119 __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 126 __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
120 127
121#define alloc_bootmem_low(x) \ 128#define alloc_bootmem_low(x) \
122 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) 129 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
index 2a5cd867c365..a2f7d7413f30 100644
--- a/include/linux/c2port.h
+++ b/include/linux/c2port.h
@@ -60,9 +60,6 @@ struct c2port_ops {
60 * Exported functions 60 * Exported functions
61 */ 61 */
62 62
63#define to_class_dev(obj) container_of((obj), struct class_device, kobj)
64#define to_c2port_device(obj) container_of((obj), struct c2port_device, class)
65
66extern struct c2port_device *c2port_device_register(char *name, 63extern struct c2port_device *c2port_device_register(char *name,
67 struct c2port_ops *ops, void *devdata); 64 struct c2port_ops *ops, void *devdata);
68extern void c2port_device_unregister(struct c2port_device *dev); 65extern void c2port_device_unregister(struct c2port_device *dev);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index b8e995fbd867..b8c60694b2b0 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -313,6 +313,7 @@ enum {
313 CEPH_MDS_OP_GETATTR = 0x00101, 313 CEPH_MDS_OP_GETATTR = 0x00101,
314 CEPH_MDS_OP_LOOKUPHASH = 0x00102, 314 CEPH_MDS_OP_LOOKUPHASH = 0x00102,
315 CEPH_MDS_OP_LOOKUPPARENT = 0x00103, 315 CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
316 CEPH_MDS_OP_LOOKUPINO = 0x00104,
316 317
317 CEPH_MDS_OP_SETXATTR = 0x01105, 318 CEPH_MDS_OP_SETXATTR = 0x01105,
318 CEPH_MDS_OP_RMXATTR = 0x01106, 319 CEPH_MDS_OP_RMXATTR = 0x01106,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 5778b559d59c..ddcb7db38e67 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -12,6 +12,8 @@
12#include <linux/sem.h> 12#include <linux/sem.h>
13#include <linux/socket.h> 13#include <linux/socket.h>
14#include <linux/if.h> 14#include <linux/if.h>
15#include <linux/fs.h>
16#include <linux/aio_abi.h> /* for aio_context_t */
15 17
16#include <asm/compat.h> 18#include <asm/compat.h>
17#include <asm/siginfo.h> 19#include <asm/siginfo.h>
@@ -26,7 +28,7 @@ typedef __compat_gid32_t compat_gid_t;
26struct compat_sel_arg_struct; 28struct compat_sel_arg_struct;
27struct rusage; 29struct rusage;
28 30
29struct compat_itimerspec { 31struct compat_itimerspec {
30 struct compat_timespec it_interval; 32 struct compat_timespec it_interval;
31 struct compat_timespec it_value; 33 struct compat_timespec it_value;
32}; 34};
@@ -70,9 +72,9 @@ struct compat_timex {
70 compat_long_t stbcnt; 72 compat_long_t stbcnt;
71 compat_int_t tai; 73 compat_int_t tai;
72 74
73 compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; 75 compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
74 compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; 76 compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
75 compat_int_t :32; compat_int_t :32; compat_int_t :32; 77 compat_int_t:32; compat_int_t:32; compat_int_t:32;
76}; 78};
77 79
78#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) 80#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
@@ -81,8 +83,10 @@ typedef struct {
81 compat_sigset_word sig[_COMPAT_NSIG_WORDS]; 83 compat_sigset_word sig[_COMPAT_NSIG_WORDS];
82} compat_sigset_t; 84} compat_sigset_t;
83 85
84extern int get_compat_timespec(struct timespec *, const struct compat_timespec __user *); 86extern int get_compat_timespec(struct timespec *,
85extern int put_compat_timespec(const struct timespec *, struct compat_timespec __user *); 87 const struct compat_timespec __user *);
88extern int put_compat_timespec(const struct timespec *,
89 struct compat_timespec __user *);
86 90
87struct compat_iovec { 91struct compat_iovec {
88 compat_uptr_t iov_base; 92 compat_uptr_t iov_base;
@@ -113,7 +117,8 @@ struct compat_rusage {
113 compat_long_t ru_nivcsw; 117 compat_long_t ru_nivcsw;
114}; 118};
115 119
116extern int put_compat_rusage(const struct rusage *, struct compat_rusage __user *); 120extern int put_compat_rusage(const struct rusage *,
121 struct compat_rusage __user *);
117 122
118struct compat_siginfo; 123struct compat_siginfo;
119 124
@@ -166,8 +171,7 @@ struct compat_ifmap {
166 unsigned char port; 171 unsigned char port;
167}; 172};
168 173
169struct compat_if_settings 174struct compat_if_settings {
170{
171 unsigned int type; /* Type of physical device or protocol */ 175 unsigned int type; /* Type of physical device or protocol */
172 unsigned int size; /* Size of the data allocated by the caller */ 176 unsigned int size; /* Size of the data allocated by the caller */
173 compat_uptr_t ifs_ifsu; /* union of pointers */ 177 compat_uptr_t ifs_ifsu; /* union of pointers */
@@ -195,8 +199,8 @@ struct compat_ifreq {
195}; 199};
196 200
197struct compat_ifconf { 201struct compat_ifconf {
198 compat_int_t ifc_len; /* size of buffer */ 202 compat_int_t ifc_len; /* size of buffer */
199 compat_caddr_t ifcbuf; 203 compat_caddr_t ifcbuf;
200}; 204};
201 205
202struct compat_robust_list { 206struct compat_robust_list {
@@ -209,6 +213,18 @@ struct compat_robust_list_head {
209 compat_uptr_t list_op_pending; 213 compat_uptr_t list_op_pending;
210}; 214};
211 215
216struct compat_statfs;
217struct compat_statfs64;
218struct compat_old_linux_dirent;
219struct compat_linux_dirent;
220struct linux_dirent64;
221struct compat_msghdr;
222struct compat_mmsghdr;
223struct compat_sysinfo;
224struct compat_sysctl_args;
225struct compat_kexec_segment;
226struct compat_mq_attr;
227
212extern void compat_exit_robust_list(struct task_struct *curr); 228extern void compat_exit_robust_list(struct task_struct *curr);
213 229
214asmlinkage long 230asmlinkage long
@@ -243,8 +259,8 @@ asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
243 const struct compat_iovec __user *vec, 259 const struct compat_iovec __user *vec,
244 unsigned long vlen, u32 pos_low, u32 pos_high); 260 unsigned long vlen, u32 pos_low, u32 pos_high);
245 261
246int compat_do_execve(char * filename, compat_uptr_t __user *argv, 262int compat_do_execve(char *filename, compat_uptr_t __user *argv,
247 compat_uptr_t __user *envp, struct pt_regs * regs); 263 compat_uptr_t __user *envp, struct pt_regs *regs);
248 264
249asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, 265asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
250 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 266 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
@@ -331,12 +347,18 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
331 const compat_sigset_t __user *sigmask, 347 const compat_sigset_t __user *sigmask,
332 compat_size_t sigsetsize); 348 compat_size_t sigsetsize);
333 349
334asmlinkage long compat_sys_utimensat(unsigned int dfd, const char __user *filename, 350asmlinkage long compat_sys_utime(const char __user *filename,
335 struct compat_timespec __user *t, int flags); 351 struct compat_utimbuf __user *t);
352asmlinkage long compat_sys_utimensat(unsigned int dfd,
353 const char __user *filename,
354 struct compat_timespec __user *t,
355 int flags);
336 356
357asmlinkage long compat_sys_time(compat_time_t __user *tloc);
358asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
337asmlinkage long compat_sys_signalfd(int ufd, 359asmlinkage long compat_sys_signalfd(int ufd,
338 const compat_sigset_t __user *sigmask, 360 const compat_sigset_t __user *sigmask,
339 compat_size_t sigsetsize); 361 compat_size_t sigsetsize);
340asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, 362asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
341 const struct compat_itimerspec __user *utmr, 363 const struct compat_itimerspec __user *utmr,
342 struct compat_itimerspec __user *otmr); 364 struct compat_itimerspec __user *otmr);
@@ -348,16 +370,190 @@ asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
348 const int __user *nodes, 370 const int __user *nodes,
349 int __user *status, 371 int __user *status,
350 int flags); 372 int flags);
351asmlinkage long compat_sys_futimesat(unsigned int dfd, const char __user *filename, 373asmlinkage long compat_sys_futimesat(unsigned int dfd,
374 const char __user *filename,
352 struct compat_timeval __user *t); 375 struct compat_timeval __user *t);
353asmlinkage long compat_sys_newfstatat(unsigned int dfd, const char __user * filename, 376asmlinkage long compat_sys_utimes(const char __user *filename,
377 struct compat_timeval __user *t);
378asmlinkage long compat_sys_newstat(const char __user *filename,
379 struct compat_stat __user *statbuf);
380asmlinkage long compat_sys_newlstat(const char __user *filename,
381 struct compat_stat __user *statbuf);
382asmlinkage long compat_sys_newfstatat(unsigned int dfd,
383 const char __user *filename,
354 struct compat_stat __user *statbuf, 384 struct compat_stat __user *statbuf,
355 int flag); 385 int flag);
386asmlinkage long compat_sys_newfstat(unsigned int fd,
387 struct compat_stat __user *statbuf);
388asmlinkage long compat_sys_statfs(const char __user *pathname,
389 struct compat_statfs __user *buf);
390asmlinkage long compat_sys_fstatfs(unsigned int fd,
391 struct compat_statfs __user *buf);
392asmlinkage long compat_sys_statfs64(const char __user *pathname,
393 compat_size_t sz,
394 struct compat_statfs64 __user *buf);
395asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
396 struct compat_statfs64 __user *buf);
397asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
398 unsigned long arg);
399asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
400 unsigned long arg);
401asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
402asmlinkage long compat_sys_io_getevents(aio_context_t ctx_id,
403 unsigned long min_nr,
404 unsigned long nr,
405 struct io_event __user *events,
406 struct compat_timespec __user *timeout);
407asmlinkage long compat_sys_io_submit(aio_context_t ctx_id, int nr,
408 u32 __user *iocb);
409asmlinkage long compat_sys_mount(const char __user *dev_name,
410 const char __user *dir_name,
411 const char __user *type, unsigned long flags,
412 const void __user *data);
413asmlinkage long compat_sys_old_readdir(unsigned int fd,
414 struct compat_old_linux_dirent __user *,
415 unsigned int count);
416asmlinkage long compat_sys_getdents(unsigned int fd,
417 struct compat_linux_dirent __user *dirent,
418 unsigned int count);
419asmlinkage long compat_sys_getdents64(unsigned int fd,
420 struct linux_dirent64 __user *dirent,
421 unsigned int count);
422asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
423 unsigned int nr_segs, unsigned int flags);
424asmlinkage long compat_sys_open(const char __user *filename, int flags,
425 int mode);
356asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, 426asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
357 int flags, int mode); 427 int flags, int mode);
428asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
429 struct file_handle __user *handle,
430 int flags);
431asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
432 compat_ulong_t __user *outp,
433 compat_ulong_t __user *exp,
434 struct compat_timespec __user *tsp,
435 void __user *sig);
436asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
437 unsigned int nfds,
438 struct compat_timespec __user *tsp,
439 const compat_sigset_t __user *sigmask,
440 compat_size_t sigsetsize);
441#if (defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)) && \
442 !defined(CONFIG_NFSD_DEPRECATED)
443union compat_nfsctl_res;
444struct compat_nfsctl_arg;
445asmlinkage long compat_sys_nfsservctl(int cmd,
446 struct compat_nfsctl_arg __user *arg,
447 union compat_nfsctl_res __user *res);
448#else
449asmlinkage long compat_sys_nfsservctl(int cmd, void *notused, void *notused2);
450#endif
451asmlinkage long compat_sys_signalfd4(int ufd,
452 const compat_sigset_t __user *sigmask,
453 compat_size_t sigsetsize, int flags);
454asmlinkage long compat_sys_get_mempolicy(int __user *policy,
455 compat_ulong_t __user *nmask,
456 compat_ulong_t maxnode,
457 compat_ulong_t addr,
458 compat_ulong_t flags);
459asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
460 compat_ulong_t maxnode);
461asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
462 compat_ulong_t mode,
463 compat_ulong_t __user *nmask,
464 compat_ulong_t maxnode, compat_ulong_t flags);
465
466asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
467 char __user *optval, unsigned int optlen);
468asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
469 unsigned flags);
470asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
471 unsigned int flags);
472asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len,
473 unsigned flags);
474asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
475 unsigned flags, struct sockaddr __user *addr,
476 int __user *addrlen);
477asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
478 unsigned vlen, unsigned int flags,
479 struct compat_timespec __user *timeout);
480asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
481 struct compat_timespec __user *rmtp);
482asmlinkage long compat_sys_getitimer(int which,
483 struct compat_itimerval __user *it);
484asmlinkage long compat_sys_setitimer(int which,
485 struct compat_itimerval __user *in,
486 struct compat_itimerval __user *out);
487asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
488asmlinkage long compat_sys_setrlimit(unsigned int resource,
489 struct compat_rlimit __user *rlim);
490asmlinkage long compat_sys_getrlimit(unsigned int resource,
491 struct compat_rlimit __user *rlim);
492asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
493asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
494 unsigned int len,
495 compat_ulong_t __user *user_mask_ptr);
496asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
497 unsigned int len,
498 compat_ulong_t __user *user_mask_ptr);
499asmlinkage long compat_sys_timer_create(clockid_t which_clock,
500 struct compat_sigevent __user *timer_event_spec,
501 timer_t __user *created_timer_id);
502asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
503 struct compat_itimerspec __user *new,
504 struct compat_itimerspec __user *old);
505asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
506 struct compat_itimerspec __user *setting);
507asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
508 struct compat_timespec __user *tp);
509asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
510 struct compat_timespec __user *tp);
511asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock,
512 struct compat_timex __user *tp);
513asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
514 struct compat_timespec __user *tp);
515asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
516 struct compat_timespec __user *rqtp,
517 struct compat_timespec __user *rmtp);
518asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
519 struct compat_siginfo __user *uinfo,
520 struct compat_timespec __user *uts, compat_size_t sigsetsize);
521asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset,
522 compat_size_t sigsetsize);
523asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
524asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
525 unsigned long arg);
526asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
527 struct compat_timespec __user *utime, u32 __user *uaddr2,
528 u32 val3);
529asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
530 char __user *optval, int __user *optlen);
531asmlinkage long compat_sys_kexec_load(unsigned long entry,
532 unsigned long nr_segments,
533 struct compat_kexec_segment __user *,
534 unsigned long flags);
535asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
536 const struct compat_mq_attr __user *u_mqstat,
537 struct compat_mq_attr __user *u_omqstat);
538asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
539 const struct compat_sigevent __user *u_notification);
540asmlinkage long compat_sys_mq_open(const char __user *u_name,
541 int oflag, compat_mode_t mode,
542 struct compat_mq_attr __user *u_attr);
543asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
544 const char __user *u_msg_ptr,
545 size_t msg_len, unsigned int msg_prio,
546 const struct compat_timespec __user *u_abs_timeout);
547asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
548 char __user *u_msg_ptr,
549 size_t msg_len, unsigned int __user *u_msg_prio,
550 const struct compat_timespec __user *u_abs_timeout);
551asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
552asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
358 553
359extern ssize_t compat_rw_copy_check_uvector(int type, 554extern ssize_t compat_rw_copy_check_uvector(int type,
360 const struct compat_iovec __user *uvector, unsigned long nr_segs, 555 const struct compat_iovec __user *uvector,
556 unsigned long nr_segs,
361 unsigned long fast_segs, struct iovec *fast_pointer, 557 unsigned long fast_segs, struct iovec *fast_pointer,
362 struct iovec **ret_pointer); 558 struct iovec **ret_pointer);
363 559
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cb4c1eb7778e..59e4028e833d 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -34,8 +34,12 @@
34 __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ 34 __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
35 (typeof(ptr)) (__ptr + (off)); }) 35 (typeof(ptr)) (__ptr + (off)); })
36 36
37#ifdef __CHECKER__
38#define __must_be_array(arr) 0
39#else
37/* &a[0] degrades to a pointer: a different type from an array */ 40/* &a[0] degrades to a pointer: a different type from an array */
38#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) 41#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
42#endif
39 43
40/* 44/*
41 * Force always-inline if the user requests it so via the .config, 45 * Force always-inline if the user requests it so via the .config,
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 64b7c003fd7a..dfadc96e9d63 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -51,7 +51,7 @@
51#if __GNUC_MINOR__ > 0 51#if __GNUC_MINOR__ > 0
52#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) 52#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
53#endif 53#endif
54#if __GNUC_MINOR__ >= 4 54#if __GNUC_MINOR__ >= 4 && !defined(__CHECKER__)
55#define __compiletime_warning(message) __attribute__((warning(message))) 55#define __compiletime_warning(message) __attribute__((warning(message)))
56#define __compiletime_error(message) __attribute__((error(message))) 56#define __compiletime_error(message) __attribute__((error(message)))
57#endif 57#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bae6fe24d1f9..b24ac56477b4 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -547,6 +547,21 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
547} 547}
548 548
549/** 549/**
550 * cpumask_parselist_user - extract a cpumask from a user string
551 * @buf: the buffer to extract from
552 * @len: the length of the buffer
553 * @dstp: the cpumask to set.
554 *
555 * Returns -errno, or 0 for success.
556 */
557static inline int cpumask_parselist_user(const char __user *buf, int len,
558 struct cpumask *dstp)
559{
560 return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
561 nr_cpumask_bits);
562}
563
564/**
550 * cpulist_scnprintf - print a cpumask into a string as comma-separated list 565 * cpulist_scnprintf - print a cpumask into a string as comma-separated list
551 * @buf: the buffer to sprintf into 566 * @buf: the buffer to sprintf into
552 * @len: the length of the buffer 567 * @len: the length of the buffer
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index cec467f5d676..9e5f5607eba3 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -38,7 +38,7 @@
38 38
39/* Although the Linux source code makes a difference between 39/* Although the Linux source code makes a difference between
40 generic endianness and the bitfields' endianness, there is no 40 generic endianness and the bitfields' endianness, there is no
41 architecture as of Linux-2.6.24-rc4 where the bitfileds' endianness 41 architecture as of Linux-2.6.24-rc4 where the bitfields' endianness
42 does not match the generic endianness. */ 42 does not match the generic endianness. */
43 43
44#if __BYTE_ORDER == __LITTLE_ENDIAN 44#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -53,7 +53,7 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.10" 56#define REL_VERSION "8.3.11"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 96 59#define PRO_VERSION_MAX 96
@@ -195,7 +195,7 @@ enum drbd_conns {
195 C_WF_REPORT_PARAMS, /* we have a socket */ 195 C_WF_REPORT_PARAMS, /* we have a socket */
196 C_CONNECTED, /* we have introduced each other */ 196 C_CONNECTED, /* we have introduced each other */
197 C_STARTING_SYNC_S, /* starting full sync by admin request. */ 197 C_STARTING_SYNC_S, /* starting full sync by admin request. */
198 C_STARTING_SYNC_T, /* stariing full sync by admin request. */ 198 C_STARTING_SYNC_T, /* starting full sync by admin request. */
199 C_WF_BITMAP_S, 199 C_WF_BITMAP_S,
200 C_WF_BITMAP_T, 200 C_WF_BITMAP_T,
201 C_WF_SYNC_UUID, 201 C_WF_SYNC_UUID,
@@ -236,7 +236,7 @@ union drbd_state {
236 * pointed out by Maxim Uvarov q<muvarov@ru.mvista.com> 236 * pointed out by Maxim Uvarov q<muvarov@ru.mvista.com>
237 * even though we transmit as "cpu_to_be32(state)", 237 * even though we transmit as "cpu_to_be32(state)",
238 * the offsets of the bitfields still need to be swapped 238 * the offsets of the bitfields still need to be swapped
239 * on different endianess. 239 * on different endianness.
240 */ 240 */
241 struct { 241 struct {
242#if defined(__LITTLE_ENDIAN_BITFIELD) 242#if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -266,7 +266,7 @@ union drbd_state {
266 unsigned peer:2 ; /* 3/4 primary/secondary/unknown */ 266 unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
267 unsigned role:2 ; /* 3/4 primary/secondary/unknown */ 267 unsigned role:2 ; /* 3/4 primary/secondary/unknown */
268#else 268#else
269# error "this endianess is not supported" 269# error "this endianness is not supported"
270#endif 270#endif
271 }; 271 };
272 unsigned int i; 272 unsigned int i;
diff --git a/include/linux/drbd_tag_magic.h b/include/linux/drbd_tag_magic.h
index f14a165e82dc..069543190516 100644
--- a/include/linux/drbd_tag_magic.h
+++ b/include/linux/drbd_tag_magic.h
@@ -30,7 +30,7 @@ enum packet_types {
30 int tag_and_len ## member; 30 int tag_and_len ## member;
31#include "linux/drbd_nl.h" 31#include "linux/drbd_nl.h"
32 32
33/* declate tag-list-sizes */ 33/* declare tag-list-sizes */
34static const int tag_list_sizes[] = { 34static const int tag_list_sizes[] = {
35#define NL_PACKET(name, number, fields) 2 fields , 35#define NL_PACKET(name, number, fields) 2 fields ,
36#define NL_INTEGER(pn, pr, member) + 4 + 4 36#define NL_INTEGER(pn, pr, member) + 4 + 4
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cdf9495df204..3f9d3251790d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -23,7 +23,8 @@
23 23
24/* Fixed constants first: */ 24/* Fixed constants first: */
25#undef NR_OPEN 25#undef NR_OPEN
26#define INR_OPEN 1024 /* Initial setting for nfile rlimits */ 26#define INR_OPEN_CUR 1024 /* Initial setting for nfile rlimits */
27#define INR_OPEN_MAX 4096 /* Hard limit for nfile rlimits */
27 28
28#define BLOCK_SIZE_BITS 10 29#define BLOCK_SIZE_BITS 10
29#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS) 30#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
@@ -634,8 +635,7 @@ struct address_space {
634 unsigned int i_mmap_writable;/* count VM_SHARED mappings */ 635 unsigned int i_mmap_writable;/* count VM_SHARED mappings */
635 struct prio_tree_root i_mmap; /* tree of private and shared mappings */ 636 struct prio_tree_root i_mmap; /* tree of private and shared mappings */
636 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ 637 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
637 spinlock_t i_mmap_lock; /* protect tree, count, list */ 638 struct mutex i_mmap_mutex; /* protect tree, count, list */
638 unsigned int truncate_count; /* Cover race condition with truncate */
639 unsigned long nrpages; /* number of total pages */ 639 unsigned long nrpages; /* number of total pages */
640 pgoff_t writeback_index;/* writeback starts here */ 640 pgoff_t writeback_index;/* writeback starts here */
641 const struct address_space_operations *a_ops; /* methods */ 641 const struct address_space_operations *a_ops; /* methods */
@@ -644,7 +644,6 @@ struct address_space {
644 spinlock_t private_lock; /* for use by the address_space */ 644 spinlock_t private_lock; /* for use by the address_space */
645 struct list_head private_list; /* ditto */ 645 struct list_head private_list; /* ditto */
646 struct address_space *assoc_mapping; /* ditto */ 646 struct address_space *assoc_mapping; /* ditto */
647 struct mutex unmap_mutex; /* to protect unmapping */
648} __attribute__((aligned(sizeof(long)))); 647} __attribute__((aligned(sizeof(long))));
649 /* 648 /*
650 * On most architectures that alignment is already the case; but 649 * On most architectures that alignment is already the case; but
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 76427e688d15..af095b54502e 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -100,17 +100,6 @@ struct fscache_operation {
100 100
101 /* operation releaser */ 101 /* operation releaser */
102 fscache_operation_release_t release; 102 fscache_operation_release_t release;
103
104#ifdef CONFIG_WORKQUEUE_DEBUGFS
105 struct work_struct put_work; /* work to delay operation put */
106 const char *name; /* operation name */
107 const char *state; /* operation state */
108#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
109#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0)
110#else
111#define fscache_set_op_name(OP, N) do { } while(0)
112#define fscache_set_op_state(OP, S) do { } while(0)
113#endif
114}; 103};
115 104
116extern atomic_t fscache_op_debug_id; 105extern atomic_t fscache_op_debug_id;
@@ -137,7 +126,6 @@ static inline void fscache_operation_init(struct fscache_operation *op,
137 op->processor = processor; 126 op->processor = processor;
138 op->release = release; 127 op->release = release;
139 INIT_LIST_HEAD(&op->pend_link); 128 INIT_LIST_HEAD(&op->pend_link);
140 fscache_set_op_state(op, "Init");
141} 129}
142 130
143/* 131/*
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 9869ef3674ac..5bbebda78b02 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -9,6 +9,8 @@
9 */ 9 */
10 10
11 11
12#ifndef __GENALLOC_H__
13#define __GENALLOC_H__
12/* 14/*
13 * General purpose special memory pool descriptor. 15 * General purpose special memory pool descriptor.
14 */ 16 */
@@ -24,13 +26,34 @@ struct gen_pool {
24struct gen_pool_chunk { 26struct gen_pool_chunk {
25 spinlock_t lock; 27 spinlock_t lock;
26 struct list_head next_chunk; /* next chunk in pool */ 28 struct list_head next_chunk; /* next chunk in pool */
29 phys_addr_t phys_addr; /* physical starting address of memory chunk */
27 unsigned long start_addr; /* starting address of memory chunk */ 30 unsigned long start_addr; /* starting address of memory chunk */
28 unsigned long end_addr; /* ending address of memory chunk */ 31 unsigned long end_addr; /* ending address of memory chunk */
29 unsigned long bits[0]; /* bitmap for allocating memory chunk */ 32 unsigned long bits[0]; /* bitmap for allocating memory chunk */
30}; 33};
31 34
32extern struct gen_pool *gen_pool_create(int, int); 35extern struct gen_pool *gen_pool_create(int, int);
33extern int gen_pool_add(struct gen_pool *, unsigned long, size_t, int); 36extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
37extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
38 size_t, int);
39/**
40 * gen_pool_add - add a new chunk of special memory to the pool
41 * @pool: pool to add new memory chunk to
42 * @addr: starting address of memory chunk to add to pool
43 * @size: size in bytes of the memory chunk to add to pool
44 * @nid: node id of the node the chunk structure and bitmap should be
45 * allocated on, or -1
46 *
47 * Add a new chunk of special memory to the specified pool.
48 *
49 * Returns 0 on success or a -ve errno on failure.
50 */
51static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
52 size_t size, int nid)
53{
54 return gen_pool_add_virt(pool, addr, -1, size, nid);
55}
34extern void gen_pool_destroy(struct gen_pool *); 56extern void gen_pool_destroy(struct gen_pool *);
35extern unsigned long gen_pool_alloc(struct gen_pool *, size_t); 57extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
36extern void gen_pool_free(struct gen_pool *, unsigned long, size_t); 58extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
59#endif /* __GENALLOC_H__ */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index d764a426e9fd..b78956b3c2e7 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -100,7 +100,6 @@ struct hd_struct {
100 sector_t start_sect; 100 sector_t start_sect;
101 sector_t nr_sects; 101 sector_t nr_sects;
102 sector_t alignment_offset; 102 sector_t alignment_offset;
103 unsigned int discard_alignment;
104 struct device __dev; 103 struct device __dev;
105 struct kobject *holder_dir; 104 struct kobject *holder_dir;
106 int policy, partno; 105 int policy, partno;
@@ -127,6 +126,7 @@ struct hd_struct {
127#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 126#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
128#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ 127#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
129#define GENHD_FL_NATIVE_CAPACITY 128 128#define GENHD_FL_NATIVE_CAPACITY 128
129#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
130 130
131enum { 131enum {
132 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ 132 DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 56d8fc87fbbc..cb4089254f01 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -249,14 +249,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
249 249
250 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & 250 z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
251 ((1 << ZONES_SHIFT) - 1); 251 ((1 << ZONES_SHIFT) - 1);
252 252 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
253 if (__builtin_constant_p(bit))
254 BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
255 else {
256#ifdef CONFIG_DEBUG_VM
257 BUG_ON((GFP_ZONE_BAD >> bit) & 1);
258#endif
259 }
260 return z; 253 return z;
261} 254}
262 255
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 8847c8c29791..48c32ebf65a7 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -92,12 +92,8 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
92#define wait_split_huge_page(__anon_vma, __pmd) \ 92#define wait_split_huge_page(__anon_vma, __pmd) \
93 do { \ 93 do { \
94 pmd_t *____pmd = (__pmd); \ 94 pmd_t *____pmd = (__pmd); \
95 spin_unlock_wait(&(__anon_vma)->root->lock); \ 95 anon_vma_lock(__anon_vma); \
96 /* \ 96 anon_vma_unlock(__anon_vma); \
97 * spin_unlock_wait() is just a loop in C and so the \
98 * CPU can reorder anything around it. \
99 */ \
100 smp_mb(); \
101 BUG_ON(pmd_trans_splitting(*____pmd) || \ 97 BUG_ON(pmd_trans_splitting(*____pmd) || \
102 pmd_trans_huge(*____pmd)); \ 98 pmd_trans_huge(*____pmd)); \
103 } while (0) 99 } while (0)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f37ba716ef8b..fb0e7329fee1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -248,6 +248,37 @@ int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
248int __must_check kstrtou8(const char *s, unsigned int base, u8 *res); 248int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
249int __must_check kstrtos8(const char *s, unsigned int base, s8 *res); 249int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
250 250
251int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
252int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
253int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
254int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
255int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
256int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
257int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
258int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
259int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
260int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
261
262static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
263{
264 return kstrtoull_from_user(s, count, base, res);
265}
266
267static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
268{
269 return kstrtoll_from_user(s, count, base, res);
270}
271
272static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
273{
274 return kstrtouint_from_user(s, count, base, res);
275}
276
277static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
278{
279 return kstrtoint_from_user(s, count, base, res);
280}
281
251extern unsigned long simple_strtoul(const char *,char **,unsigned int); 282extern unsigned long simple_strtoul(const char *,char **,unsigned int);
252extern long simple_strtol(const char *,char **,unsigned int); 283extern long simple_strtol(const char *,char **,unsigned int);
253extern unsigned long long simple_strtoull(const char *,char **,unsigned int); 284extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
@@ -638,6 +669,13 @@ struct sysinfo {
638 char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */ 669 char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
639}; 670};
640 671
672#ifdef __CHECKER__
673#define BUILD_BUG_ON_NOT_POWER_OF_2(n)
674#define BUILD_BUG_ON_ZERO(e)
675#define BUILD_BUG_ON_NULL(e)
676#define BUILD_BUG_ON(condition)
677#else /* __CHECKER__ */
678
641/* Force a compilation error if a constant expression is not a power of 2 */ 679/* Force a compilation error if a constant expression is not a power of 2 */
642#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ 680#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
643 BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) 681 BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
@@ -674,6 +712,7 @@ extern int __build_bug_on_failed;
674 if (condition) __build_bug_on_failed = 1; \ 712 if (condition) __build_bug_on_failed = 1; \
675 } while(0) 713 } while(0)
676#endif 714#endif
715#endif /* __CHECKER__ */
677 716
678/* Trap pasters of __FUNCTION__ at compile-time */ 717/* Trap pasters of __FUNCTION__ at compile-time */
679#define __FUNCTION__ (__func__) 718#define __FUNCTION__ (__func__)
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index f158eb1149aa..b8d6fffed4d8 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -25,7 +25,7 @@ enum pca9532_state {
25}; 25};
26 26
27enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED, 27enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED,
28 PCA9532_TYPE_N2100_BEEP }; 28 PCA9532_TYPE_N2100_BEEP, PCA9532_TYPE_GPIO };
29 29
30struct pca9532_led { 30struct pca9532_led {
31 u8 id; 31 u8 id;
@@ -41,6 +41,7 @@ struct pca9532_platform_data {
41 struct pca9532_led leds[16]; 41 struct pca9532_led leds[16];
42 u8 pwm[2]; 42 u8 pwm[2];
43 u8 psc[2]; 43 u8 psc[2];
44 int gpio_base;
44}; 45};
45 46
46#endif /* __LINUX_PCA9532_H */ 47#endif /* __LINUX_PCA9532_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 61e0340a4b77..5884def15a24 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -207,5 +207,7 @@ struct gpio_led_platform_data {
207 unsigned long *delay_off); 207 unsigned long *delay_off);
208}; 208};
209 209
210struct platform_device *gpio_led_register_device(
211 int id, const struct gpio_led_platform_data *pdata);
210 212
211#endif /* __LINUX_LEDS_H_INCLUDED */ 213#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 4aef1dda6406..ef820a3c378b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -487,12 +487,15 @@ static inline void print_irqtrace_events(struct task_struct *curr)
487#ifdef CONFIG_DEBUG_LOCK_ALLOC 487#ifdef CONFIG_DEBUG_LOCK_ALLOC
488# ifdef CONFIG_PROVE_LOCKING 488# ifdef CONFIG_PROVE_LOCKING
489# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) 489# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
490# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
490# else 491# else
491# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) 492# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
493# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
492# endif 494# endif
493# define mutex_release(l, n, i) lock_release(l, n, i) 495# define mutex_release(l, n, i) lock_release(l, n, i)
494#else 496#else
495# define mutex_acquire(l, s, t, i) do { } while (0) 497# define mutex_acquire(l, s, t, i) do { } while (0)
498# define mutex_acquire_nest(l, s, t, n, i) do { } while (0)
496# define mutex_release(l, n, i) do { } while (0) 499# define mutex_release(l, n, i) do { } while (0)
497#endif 500#endif
498 501
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 6a4fab7c6e09..7a71ffad037c 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -139,9 +139,9 @@ write intent log information, three of which are mentioned here.
139 * .list is on one of three lists: 139 * .list is on one of three lists:
140 * in_use: currently in use (refcnt > 0, lc_number != LC_FREE) 140 * in_use: currently in use (refcnt > 0, lc_number != LC_FREE)
141 * lru: unused but ready to be reused or recycled 141 * lru: unused but ready to be reused or recycled
142 * (ts_refcnt == 0, lc_number != LC_FREE), 142 * (lc_refcnt == 0, lc_number != LC_FREE),
143 * free: unused but ready to be recycled 143 * free: unused but ready to be recycled
144 * (ts_refcnt == 0, lc_number == LC_FREE), 144 * (lc_refcnt == 0, lc_number == LC_FREE),
145 * 145 *
146 * an element is said to be "in the active set", 146 * an element is said to be "in the active set",
147 * if either on "in_use" or "lru", i.e. lc_number != LC_FREE. 147 * if either on "in_use" or "lru", i.e. lc_number != LC_FREE.
@@ -160,8 +160,8 @@ struct lc_element {
160 struct hlist_node colision; 160 struct hlist_node colision;
161 struct list_head list; /* LRU list or free list */ 161 struct list_head list; /* LRU list or free list */
162 unsigned refcnt; 162 unsigned refcnt;
163 /* back "pointer" into ts_cache->element[index], 163 /* back "pointer" into lc_cache->element[index],
164 * for paranoia, and for "ts_element_to_index" */ 164 * for paranoia, and for "lc_element_to_index" */
165 unsigned lc_index; 165 unsigned lc_index;
166 /* if we want to track a larger set of objects, 166 /* if we want to track a larger set of objects,
167 * it needs to become arch independend u64 */ 167 * it needs to become arch independend u64 */
@@ -190,8 +190,8 @@ struct lru_cache {
190 /* Arbitrary limit on maximum tracked objects. Practical limit is much 190 /* Arbitrary limit on maximum tracked objects. Practical limit is much
191 * lower due to allocation failures, probably. For typical use cases, 191 * lower due to allocation failures, probably. For typical use cases,
192 * nr_elements should be a few thousand at most. 192 * nr_elements should be a few thousand at most.
193 * This also limits the maximum value of ts_element.ts_index, allowing the 193 * This also limits the maximum value of lc_element.lc_index, allowing the
194 * 8 high bits of .ts_index to be overloaded with flags in the future. */ 194 * 8 high bits of .lc_index to be overloaded with flags in the future. */
195#define LC_MAX_ACTIVE (1<<24) 195#define LC_MAX_ACTIVE (1<<24)
196 196
197 /* statistics */ 197 /* statistics */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 62a10c2a11f2..7525e38c434d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -2,6 +2,8 @@
2#define _LINUX_MEMBLOCK_H 2#define _LINUX_MEMBLOCK_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#define MEMBLOCK_ERROR 0
6
5#ifdef CONFIG_HAVE_MEMBLOCK 7#ifdef CONFIG_HAVE_MEMBLOCK
6/* 8/*
7 * Logical memory blocks. 9 * Logical memory blocks.
@@ -20,7 +22,6 @@
20#include <asm/memblock.h> 22#include <asm/memblock.h>
21 23
22#define INIT_MEMBLOCK_REGIONS 128 24#define INIT_MEMBLOCK_REGIONS 128
23#define MEMBLOCK_ERROR 0
24 25
25struct memblock_region { 26struct memblock_region {
26 phys_addr_t base; 27 phys_addr_t base;
@@ -160,6 +161,12 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
160#define __initdata_memblock 161#define __initdata_memblock
161#endif 162#endif
162 163
164#else
165static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
166{
167 return MEMBLOCK_ERROR;
168}
169
163#endif /* CONFIG_HAVE_MEMBLOCK */ 170#endif /* CONFIG_HAVE_MEMBLOCK */
164 171
165#endif /* __KERNEL__ */ 172#endif /* __KERNEL__ */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 31ac26ca4acf..7978eec1b7d9 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -199,6 +199,9 @@ void mpol_free_shared_policy(struct shared_policy *p);
199struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 199struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
200 unsigned long idx); 200 unsigned long idx);
201 201
202struct mempolicy *get_vma_policy(struct task_struct *tsk,
203 struct vm_area_struct *vma, unsigned long addr);
204
202extern void numa_default_policy(void); 205extern void numa_default_policy(void);
203extern void numa_policy_init(void); 206extern void numa_policy_init(void);
204extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 207extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
@@ -228,10 +231,10 @@ int do_migrate_pages(struct mm_struct *mm,
228 231
229#ifdef CONFIG_TMPFS 232#ifdef CONFIG_TMPFS
230extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); 233extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
234#endif
231 235
232extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, 236extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
233 int no_context); 237 int no_context);
234#endif
235 238
236/* Check if a vma is migratable */ 239/* Check if a vma is migratable */
237static inline int vma_migratable(struct vm_area_struct *vma) 240static inline int vma_migratable(struct vm_area_struct *vma)
@@ -368,13 +371,13 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
368{ 371{
369 return 1; /* error */ 372 return 1; /* error */
370} 373}
374#endif
371 375
372static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, 376static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
373 int no_context) 377 int no_context)
374{ 378{
375 return 0; 379 return 0;
376} 380}
377#endif
378 381
379#endif /* CONFIG_NUMA */ 382#endif /* CONFIG_NUMA */
380#endif /* __KERNEL__ */ 383#endif /* __KERNEL__ */
diff --git a/include/linux/mfd/db5500-prcmu.h b/include/linux/mfd/db5500-prcmu.h
new file mode 100644
index 000000000000..f0977986402c
--- /dev/null
+++ b/include/linux/mfd/db5500-prcmu.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 *
6 * U5500 PRCMU API.
7 */
8#ifndef __MACH_PRCMU_U5500_H
9#define __MACH_PRCMU_U5500_H
10
11#ifdef CONFIG_UX500_SOC_DB5500
12
13void db5500_prcmu_early_init(void);
14
15int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
16int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
17
18#else /* !CONFIG_UX500_SOC_DB5500 */
19
20static inline void db5500_prcmu_early_init(void)
21{
22}
23
24static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
25{
26 return -ENOSYS;
27}
28
29static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
30{
31 return -ENOSYS;
32}
33
34#endif /* CONFIG_UX500_SOC_DB5500 */
35
36static inline int db5500_prcmu_config_abb_event_readout(u32 abb_events)
37{
38#ifdef CONFIG_MACH_U5500_SIMULATOR
39 return 0;
40#else
41 return -1;
42#endif
43}
44
45#endif /* __MACH_PRCMU_U5500_H */
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
new file mode 100644
index 000000000000..917dbcab701c
--- /dev/null
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -0,0 +1,978 @@
1/*
2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
4 *
5 * License Terms: GNU General Public License v2
6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
7 *
8 * PRCMU f/w APIs
9 */
10#ifndef __MFD_DB8500_PRCMU_H
11#define __MFD_DB8500_PRCMU_H
12
13#include <linux/interrupt.h>
14#include <linux/notifier.h>
15
16/* This portion previously known as <mach/prcmu-fw-defs_v1.h> */
17
18/**
19 * enum state - ON/OFF state definition
20 * @OFF: State is ON
21 * @ON: State is OFF
22 *
23 */
24enum state {
25 OFF = 0x0,
26 ON = 0x1,
27};
28
29/**
30 * enum ret_state - general purpose On/Off/Retention states
31 *
32 */
33enum ret_state {
34 OFFST = 0,
35 ONST = 1,
36 RETST = 2
37};
38
39/**
40 * enum clk_arm - ARM Cortex A9 clock schemes
41 * @A9_OFF:
42 * @A9_BOOT:
43 * @A9_OPPT1:
44 * @A9_OPPT2:
45 * @A9_EXTCLK:
46 */
47enum clk_arm {
48 A9_OFF,
49 A9_BOOT,
50 A9_OPPT1,
51 A9_OPPT2,
52 A9_EXTCLK
53};
54
55/**
56 * enum clk_gen - GEN#0/GEN#1 clock schemes
57 * @GEN_OFF:
58 * @GEN_BOOT:
59 * @GEN_OPPT1:
60 */
61enum clk_gen {
62 GEN_OFF,
63 GEN_BOOT,
64 GEN_OPPT1,
65};
66
67/* some information between arm and xp70 */
68
69/**
70 * enum romcode_write - Romcode message written by A9 AND read by XP70
71 * @RDY_2_DS: Value set when ApDeepSleep state can be executed by XP70
72 * @RDY_2_XP70_RST: Value set when 0x0F has been successfully polled by the
73 * romcode. The xp70 will go into self-reset
74 */
75enum romcode_write {
76 RDY_2_DS = 0x09,
77 RDY_2_XP70_RST = 0x10
78};
79
80/**
81 * enum romcode_read - Romcode message written by XP70 and read by A9
82 * @INIT: Init value when romcode field is not used
83 * @FS_2_DS: Value set when power state is going from ApExecute to
84 * ApDeepSleep
85 * @END_DS: Value set when ApDeepSleep power state is reached coming from
86 * ApExecute state
87 * @DS_TO_FS: Value set when power state is going from ApDeepSleep to
88 * ApExecute
89 * @END_FS: Value set when ApExecute power state is reached coming from
90 * ApDeepSleep state
91 * @SWR: Value set when power state is going to ApReset
92 * @END_SWR: Value set when the xp70 finished executing ApReset actions and
93 * waits for romcode acknowledgment to go to self-reset
94 */
95enum romcode_read {
96 INIT = 0x00,
97 FS_2_DS = 0x0A,
98 END_DS = 0x0B,
99 DS_TO_FS = 0x0C,
100 END_FS = 0x0D,
101 SWR = 0x0E,
102 END_SWR = 0x0F
103};
104
105/**
106 * enum ap_pwrst - current power states defined in PRCMU firmware
107 * @NO_PWRST: Current power state init
108 * @AP_BOOT: Current power state is apBoot
109 * @AP_EXECUTE: Current power state is apExecute
110 * @AP_DEEP_SLEEP: Current power state is apDeepSleep
111 * @AP_SLEEP: Current power state is apSleep
112 * @AP_IDLE: Current power state is apIdle
113 * @AP_RESET: Current power state is apReset
114 */
115enum ap_pwrst {
116 NO_PWRST = 0x00,
117 AP_BOOT = 0x01,
118 AP_EXECUTE = 0x02,
119 AP_DEEP_SLEEP = 0x03,
120 AP_SLEEP = 0x04,
121 AP_IDLE = 0x05,
122 AP_RESET = 0x06
123};
124
125/**
126 * enum ap_pwrst_trans - Transition states defined in PRCMU firmware
127 * @NO_TRANSITION: No power state transition
128 * @APEXECUTE_TO_APSLEEP: Power state transition from ApExecute to ApSleep
129 * @APIDLE_TO_APSLEEP: Power state transition from ApIdle to ApSleep
130 * @APBOOT_TO_APEXECUTE: Power state transition from ApBoot to ApExecute
131 * @APEXECUTE_TO_APDEEPSLEEP: Power state transition from ApExecute to
132 * ApDeepSleep
133 * @APEXECUTE_TO_APIDLE: Power state transition from ApExecute to ApIdle
134 */
135enum ap_pwrst_trans {
136 NO_TRANSITION = 0x00,
137 APEXECUTE_TO_APSLEEP = 0x01,
138 APIDLE_TO_APSLEEP = 0x02, /* To be removed */
139 PRCMU_AP_SLEEP = 0x01,
140 APBOOT_TO_APEXECUTE = 0x03,
141 APEXECUTE_TO_APDEEPSLEEP = 0x04, /* To be removed */
142 PRCMU_AP_DEEP_SLEEP = 0x04,
143 APEXECUTE_TO_APIDLE = 0x05, /* To be removed */
144 PRCMU_AP_IDLE = 0x05,
145 PRCMU_AP_DEEP_IDLE = 0x07,
146};
147
148/**
149 * enum ddr_pwrst - DDR power states definition
150 * @DDR_PWR_STATE_UNCHANGED: SDRAM and DDR controller state is unchanged
151 * @DDR_PWR_STATE_ON:
152 * @DDR_PWR_STATE_OFFLOWLAT:
153 * @DDR_PWR_STATE_OFFHIGHLAT:
154 */
155enum ddr_pwrst {
156 DDR_PWR_STATE_UNCHANGED = 0x00,
157 DDR_PWR_STATE_ON = 0x01,
158 DDR_PWR_STATE_OFFLOWLAT = 0x02,
159 DDR_PWR_STATE_OFFHIGHLAT = 0x03
160};
161
162/**
163 * enum arm_opp - ARM OPP states definition
164 * @ARM_OPP_INIT:
165 * @ARM_NO_CHANGE: The ARM operating point is unchanged
166 * @ARM_100_OPP: The new ARM operating point is arm100opp
167 * @ARM_50_OPP: The new ARM operating point is arm50opp
168 * @ARM_MAX_OPP: Operating point is "max" (more than 100)
169 * @ARM_MAX_FREQ100OPP: Set max opp if available, else 100
170 * @ARM_EXTCLK: The new ARM operating point is armExtClk
171 */
172enum arm_opp {
173 ARM_OPP_INIT = 0x00,
174 ARM_NO_CHANGE = 0x01,
175 ARM_100_OPP = 0x02,
176 ARM_50_OPP = 0x03,
177 ARM_MAX_OPP = 0x04,
178 ARM_MAX_FREQ100OPP = 0x05,
179 ARM_EXTCLK = 0x07
180};
181
182/**
183 * enum ape_opp - APE OPP states definition
184 * @APE_OPP_INIT:
185 * @APE_NO_CHANGE: The APE operating point is unchanged
186 * @APE_100_OPP: The new APE operating point is ape100opp
187 * @APE_50_OPP: 50%
188 */
189enum ape_opp {
190 APE_OPP_INIT = 0x00,
191 APE_NO_CHANGE = 0x01,
192 APE_100_OPP = 0x02,
193 APE_50_OPP = 0x03
194};
195
196/**
197 * enum hw_acc_state - State definition for hardware accelerator
198 * @HW_NO_CHANGE: The hardware accelerator state must remain unchanged
199 * @HW_OFF: The hardware accelerator must be switched off
200 * @HW_OFF_RAMRET: The hardware accelerator must be switched off with its
201 * internal RAM in retention
202 * @HW_ON: The hwa hardware accelerator hwa must be switched on
203 *
204 * NOTE! Deprecated, to be removed when all users switched over to use the
205 * regulator API.
206 */
207enum hw_acc_state {
208 HW_NO_CHANGE = 0x00,
209 HW_OFF = 0x01,
210 HW_OFF_RAMRET = 0x02,
211 HW_ON = 0x04
212};
213
214/**
215 * enum mbox_2_arm_stat - Status messages definition for mbox_arm
216 * @BOOT_TO_EXECUTEOK: The apBoot to apExecute state transition has been
217 * completed
218 * @DEEPSLEEPOK: The apExecute to apDeepSleep state transition has been
219 * completed
220 * @SLEEPOK: The apExecute to apSleep state transition has been completed
221 * @IDLEOK: The apExecute to apIdle state transition has been completed
222 * @SOFTRESETOK: The A9 watchdog/ SoftReset state has been completed
223 * @SOFTRESETGO : The A9 watchdog/SoftReset state is on going
224 * @BOOT_TO_EXECUTE: The apBoot to apExecute state transition is on going
225 * @EXECUTE_TO_DEEPSLEEP: The apExecute to apDeepSleep state transition is on
226 * going
227 * @DEEPSLEEP_TO_EXECUTE: The apDeepSleep to apExecute state transition is on
228 * going
229 * @DEEPSLEEP_TO_EXECUTEOK: The apDeepSleep to apExecute state transition has
230 * been completed
231 * @EXECUTE_TO_SLEEP: The apExecute to apSleep state transition is on going
232 * @SLEEP_TO_EXECUTE: The apSleep to apExecute state transition is on going
233 * @SLEEP_TO_EXECUTEOK: The apSleep to apExecute state transition has been
234 * completed
235 * @EXECUTE_TO_IDLE: The apExecute to apIdle state transition is on going
236 * @IDLE_TO_EXECUTE: The apIdle to apExecute state transition is on going
237 * @IDLE_TO_EXECUTEOK: The apIdle to apExecute state transition has been
238 * completed
239 * @INIT_STATUS: Status init
240 */
241enum ap_pwrsttr_status {
242 BOOT_TO_EXECUTEOK = 0xFF,
243 DEEPSLEEPOK = 0xFE,
244 SLEEPOK = 0xFD,
245 IDLEOK = 0xFC,
246 SOFTRESETOK = 0xFB,
247 SOFTRESETGO = 0xFA,
248 BOOT_TO_EXECUTE = 0xF9,
249 EXECUTE_TO_DEEPSLEEP = 0xF8,
250 DEEPSLEEP_TO_EXECUTE = 0xF7,
251 DEEPSLEEP_TO_EXECUTEOK = 0xF6,
252 EXECUTE_TO_SLEEP = 0xF5,
253 SLEEP_TO_EXECUTE = 0xF4,
254 SLEEP_TO_EXECUTEOK = 0xF3,
255 EXECUTE_TO_IDLE = 0xF2,
256 IDLE_TO_EXECUTE = 0xF1,
257 IDLE_TO_EXECUTEOK = 0xF0,
258 RDYTODS_RETURNTOEXE = 0xEF,
259 NORDYTODS_RETURNTOEXE = 0xEE,
260 EXETOSLEEP_RETURNTOEXE = 0xED,
261 EXETOIDLE_RETURNTOEXE = 0xEC,
262 INIT_STATUS = 0xEB,
263
264 /*error messages */
265 INITERROR = 0x00,
266 PLLARMLOCKP_ER = 0x01,
267 PLLDDRLOCKP_ER = 0x02,
268 PLLSOCLOCKP_ER = 0x03,
269 PLLSOCK1LOCKP_ER = 0x04,
270 ARMWFI_ER = 0x05,
271 SYSCLKOK_ER = 0x06,
272 I2C_NACK_DATA_ER = 0x07,
273 BOOT_ER = 0x08,
274 I2C_STATUS_ALWAYS_1 = 0x0A,
275 I2C_NACK_REG_ADDR_ER = 0x0B,
276 I2C_NACK_DATA0123_ER = 0x1B,
277 I2C_NACK_ADDR_ER = 0x1F,
278 CURAPPWRSTISNOT_BOOT = 0x20,
279 CURAPPWRSTISNOT_EXECUTE = 0x21,
280 CURAPPWRSTISNOT_SLEEPMODE = 0x22,
281 CURAPPWRSTISNOT_CORRECTFORIT10 = 0x23,
282 FIFO4500WUISNOT_WUPEVENT = 0x24,
283 PLL32KLOCKP_ER = 0x29,
284 DDRDEEPSLEEPOK_ER = 0x2A,
285 ROMCODEREADY_ER = 0x50,
286 WUPBEFOREDS = 0x51,
287 DDRCONFIG_ER = 0x52,
288 WUPBEFORESLEEP = 0x53,
289 WUPBEFOREIDLE = 0x54
290}; /* earlier called as mbox_2_arm_stat */
291
292/**
293 * enum dvfs_stat - DVFS status messages definition
294 * @DVFS_GO: A state transition DVFS is on going
295 * @DVFS_ARM100OPPOK: The state transition DVFS has been completed for 100OPP
296 * @DVFS_ARM50OPPOK: The state transition DVFS has been completed for 50OPP
297 * @DVFS_ARMEXTCLKOK: The state transition DVFS has been completed for EXTCLK
298 * @DVFS_NOCHGTCLKOK: The state transition DVFS has been completed for
299 * NOCHGCLK
300 * @DVFS_INITSTATUS: Value init
301 */
302enum dvfs_stat {
303 DVFS_GO = 0xFF,
304 DVFS_ARM100OPPOK = 0xFE,
305 DVFS_ARM50OPPOK = 0xFD,
306 DVFS_ARMEXTCLKOK = 0xFC,
307 DVFS_NOCHGTCLKOK = 0xFB,
308 DVFS_INITSTATUS = 0x00
309};
310
311/**
312 * enum sva_mmdsp_stat - SVA MMDSP status messages
313 * @SVA_MMDSP_GO: SVAMMDSP interrupt has happened
314 * @SVA_MMDSP_INIT: Status init
315 */
316enum sva_mmdsp_stat {
317 SVA_MMDSP_GO = 0xFF,
318 SVA_MMDSP_INIT = 0x00
319};
320
321/**
322 * enum sia_mmdsp_stat - SIA MMDSP status messages
323 * @SIA_MMDSP_GO: SIAMMDSP interrupt has happened
324 * @SIA_MMDSP_INIT: Status init
325 */
326enum sia_mmdsp_stat {
327 SIA_MMDSP_GO = 0xFF,
328 SIA_MMDSP_INIT = 0x00
329};
330
331/**
332 * enum mbox_to_arm_err - Error messages definition
333 * @INIT_ERR: Init value
334 * @PLLARMLOCKP_ERR: PLLARM has not been correctly locked in given time
335 * @PLLDDRLOCKP_ERR: PLLDDR has not been correctly locked in the given time
336 * @PLLSOC0LOCKP_ERR: PLLSOC0 has not been correctly locked in the given time
337 * @PLLSOC1LOCKP_ERR: PLLSOC1 has not been correctly locked in the given time
338 * @ARMWFI_ERR: The ARM WFI has not been correctly executed in the given time
339 * @SYSCLKOK_ERR: The SYSCLK is not available in the given time
340 * @BOOT_ERR: Romcode has not validated the XP70 self reset in the given time
341 * @ROMCODESAVECONTEXT: The Romcode didn.t correctly save it secure context
342 * @VARMHIGHSPEEDVALTO_ERR: The ARM high speed supply value transfered
343 * through I2C has not been correctly executed in the given time
344 * @VARMHIGHSPEEDACCESS_ERR: The command value of VarmHighSpeedVal transfered
345 * through I2C has not been correctly executed in the given time
346 * @VARMLOWSPEEDVALTO_ERR:The ARM low speed supply value transfered through
347 * I2C has not been correctly executed in the given time
348 * @VARMLOWSPEEDACCESS_ERR: The command value of VarmLowSpeedVal transfered
349 * through I2C has not been correctly executed in the given time
350 * @VARMRETENTIONVALTO_ERR: The ARM retention supply value transfered through
351 * I2C has not been correctly executed in the given time
352 * @VARMRETENTIONACCESS_ERR: The command value of VarmRetentionVal transfered
353 * through I2C has not been correctly executed in the given time
354 * @VAPEHIGHSPEEDVALTO_ERR: The APE highspeed supply value transfered through
355 * I2C has not been correctly executed in the given time
356 * @VSAFEHPVALTO_ERR: The SAFE high power supply value transfered through I2C
357 * has not been correctly executed in the given time
358 * @VMODSEL1VALTO_ERR: The MODEM sel1 supply value transfered through I2C has
359 * not been correctly executed in the given time
360 * @VMODSEL2VALTO_ERR: The MODEM sel2 supply value transfered through I2C has
361 * not been correctly executed in the given time
362 * @VARMOFFACCESS_ERR: The command value of Varm ON/OFF transfered through
363 * I2C has not been correctly executed in the given time
364 * @VAPEOFFACCESS_ERR: The command value of Vape ON/OFF transfered through
365 * I2C has not been correctly executed in the given time
366 * @VARMRETACCES_ERR: The command value of Varm retention ON/OFF transfered
367 * through I2C has not been correctly executed in the given time
368 * @CURAPPWRSTISNOTBOOT:Generated when Arm want to do power state transition
369 * ApBoot to ApExecute but the power current state is not Apboot
370 * @CURAPPWRSTISNOTEXECUTE: Generated when Arm want to do power state
371 * transition from ApExecute to others power state but the
372 * power current state is not ApExecute
373 * @CURAPPWRSTISNOTSLEEPMODE: Generated when wake up events are transmitted
374 * but the power current state is not ApDeepSleep/ApSleep/ApIdle
375 * @CURAPPWRSTISNOTCORRECTDBG: Generated when wake up events are transmitted
376 * but the power current state is not correct
377 * @ARMREGU1VALTO_ERR:The ArmRegu1 value transferred through I2C has not
378 * been correctly executed in the given time
379 * @ARMREGU2VALTO_ERR: The ArmRegu2 value transferred through I2C has not
380 * been correctly executed in the given time
381 * @VAPEREGUVALTO_ERR: The VApeRegu value transfered through I2C has not
382 * been correctly executed in the given time
383 * @VSMPS3REGUVALTO_ERR: The VSmps3Regu value transfered through I2C has not
384 * been correctly executed in the given time
385 * @VMODREGUVALTO_ERR: The VModemRegu value transfered through I2C has not
386 * been correctly executed in the given time
387 */
388enum mbox_to_arm_err {
389 INIT_ERR = 0x00,
390 PLLARMLOCKP_ERR = 0x01,
391 PLLDDRLOCKP_ERR = 0x02,
392 PLLSOC0LOCKP_ERR = 0x03,
393 PLLSOC1LOCKP_ERR = 0x04,
394 ARMWFI_ERR = 0x05,
395 SYSCLKOK_ERR = 0x06,
396 BOOT_ERR = 0x07,
397 ROMCODESAVECONTEXT = 0x08,
398 VARMHIGHSPEEDVALTO_ERR = 0x10,
399 VARMHIGHSPEEDACCESS_ERR = 0x11,
400 VARMLOWSPEEDVALTO_ERR = 0x12,
401 VARMLOWSPEEDACCESS_ERR = 0x13,
402 VARMRETENTIONVALTO_ERR = 0x14,
403 VARMRETENTIONACCESS_ERR = 0x15,
404 VAPEHIGHSPEEDVALTO_ERR = 0x16,
405 VSAFEHPVALTO_ERR = 0x17,
406 VMODSEL1VALTO_ERR = 0x18,
407 VMODSEL2VALTO_ERR = 0x19,
408 VARMOFFACCESS_ERR = 0x1A,
409 VAPEOFFACCESS_ERR = 0x1B,
410 VARMRETACCES_ERR = 0x1C,
411 CURAPPWRSTISNOTBOOT = 0x20,
412 CURAPPWRSTISNOTEXECUTE = 0x21,
413 CURAPPWRSTISNOTSLEEPMODE = 0x22,
414 CURAPPWRSTISNOTCORRECTDBG = 0x23,
415 ARMREGU1VALTO_ERR = 0x24,
416 ARMREGU2VALTO_ERR = 0x25,
417 VAPEREGUVALTO_ERR = 0x26,
418 VSMPS3REGUVALTO_ERR = 0x27,
419 VMODREGUVALTO_ERR = 0x28
420};
421
422enum hw_acc {
423 SVAMMDSP = 0,
424 SVAPIPE = 1,
425 SIAMMDSP = 2,
426 SIAPIPE = 3,
427 SGA = 4,
428 B2R2MCDE = 5,
429 ESRAM12 = 6,
430 ESRAM34 = 7,
431};
432
433enum cs_pwrmgt {
434 PWRDNCS0 = 0,
435 WKUPCS0 = 1,
436 PWRDNCS1 = 2,
437 WKUPCS1 = 3
438};
439
440/* Defs related to autonomous power management */
441
442/**
443 * enum sia_sva_pwr_policy - Power policy
444 * @NO_CHGT: No change
445 * @DSPOFF_HWPOFF:
446 * @DSPOFFRAMRET_HWPOFF:
447 * @DSPCLKOFF_HWPOFF:
448 * @DSPCLKOFF_HWPCLKOFF:
449 *
450 */
451enum sia_sva_pwr_policy {
452 NO_CHGT = 0x0,
453 DSPOFF_HWPOFF = 0x1,
454 DSPOFFRAMRET_HWPOFF = 0x2,
455 DSPCLKOFF_HWPOFF = 0x3,
456 DSPCLKOFF_HWPCLKOFF = 0x4,
457};
458
459/**
460 * enum auto_enable - Auto Power enable
461 * @AUTO_OFF:
462 * @AUTO_ON:
463 *
464 */
465enum auto_enable {
466 AUTO_OFF = 0x0,
467 AUTO_ON = 0x1,
468};
469
470/* End of file previously known as prcmu-fw-defs_v1.h */
471
472/* PRCMU Wakeup defines */
473enum prcmu_wakeup_index {
474 PRCMU_WAKEUP_INDEX_RTC,
475 PRCMU_WAKEUP_INDEX_RTT0,
476 PRCMU_WAKEUP_INDEX_RTT1,
477 PRCMU_WAKEUP_INDEX_HSI0,
478 PRCMU_WAKEUP_INDEX_HSI1,
479 PRCMU_WAKEUP_INDEX_USB,
480 PRCMU_WAKEUP_INDEX_ABB,
481 PRCMU_WAKEUP_INDEX_ABB_FIFO,
482 PRCMU_WAKEUP_INDEX_ARM,
483 NUM_PRCMU_WAKEUP_INDICES
484};
485#define PRCMU_WAKEUP(_name) (BIT(PRCMU_WAKEUP_INDEX_##_name))
486
487/* PRCMU QoS APE OPP class */
488#define PRCMU_QOS_APE_OPP 1
489#define PRCMU_QOS_DDR_OPP 2
490#define PRCMU_QOS_DEFAULT_VALUE -1
491
492/**
493 * enum hw_acc_dev - enum for hw accelerators
494 * @HW_ACC_SVAMMDSP: for SVAMMDSP
495 * @HW_ACC_SVAPIPE: for SVAPIPE
496 * @HW_ACC_SIAMMDSP: for SIAMMDSP
497 * @HW_ACC_SIAPIPE: for SIAPIPE
498 * @HW_ACC_SGA: for SGA
499 * @HW_ACC_B2R2: for B2R2
500 * @HW_ACC_MCDE: for MCDE
501 * @HW_ACC_ESRAM1: for ESRAM1
502 * @HW_ACC_ESRAM2: for ESRAM2
503 * @HW_ACC_ESRAM3: for ESRAM3
504 * @HW_ACC_ESRAM4: for ESRAM4
505 * @NUM_HW_ACC: number of hardware accelerators
506 *
507 * Different hw accelerators which can be turned ON/
508 * OFF or put into retention (MMDSPs and ESRAMs).
509 * Used with EPOD API.
510 *
511 * NOTE! Deprecated, to be removed when all users switched over to use the
512 * regulator API.
513 */
514enum hw_acc_dev {
515 HW_ACC_SVAMMDSP,
516 HW_ACC_SVAPIPE,
517 HW_ACC_SIAMMDSP,
518 HW_ACC_SIAPIPE,
519 HW_ACC_SGA,
520 HW_ACC_B2R2,
521 HW_ACC_MCDE,
522 HW_ACC_ESRAM1,
523 HW_ACC_ESRAM2,
524 HW_ACC_ESRAM3,
525 HW_ACC_ESRAM4,
526 NUM_HW_ACC
527};
528
529/*
530 * Ids for all EPODs (power domains)
531 * - EPOD_ID_SVAMMDSP: power domain for SVA MMDSP
532 * - EPOD_ID_SVAPIPE: power domain for SVA pipe
533 * - EPOD_ID_SIAMMDSP: power domain for SIA MMDSP
534 * - EPOD_ID_SIAPIPE: power domain for SIA pipe
535 * - EPOD_ID_SGA: power domain for SGA
536 * - EPOD_ID_B2R2_MCDE: power domain for B2R2 and MCDE
537 * - EPOD_ID_ESRAM12: power domain for ESRAM 1 and 2
538 * - EPOD_ID_ESRAM34: power domain for ESRAM 3 and 4
539 * - NUM_EPOD_ID: number of power domains
540 */
541#define EPOD_ID_SVAMMDSP 0
542#define EPOD_ID_SVAPIPE 1
543#define EPOD_ID_SIAMMDSP 2
544#define EPOD_ID_SIAPIPE 3
545#define EPOD_ID_SGA 4
546#define EPOD_ID_B2R2_MCDE 5
547#define EPOD_ID_ESRAM12 6
548#define EPOD_ID_ESRAM34 7
549#define NUM_EPOD_ID 8
550
551/*
552 * state definition for EPOD (power domain)
553 * - EPOD_STATE_NO_CHANGE: The EPOD should remain unchanged
554 * - EPOD_STATE_OFF: The EPOD is switched off
555 * - EPOD_STATE_RAMRET: The EPOD is switched off with its internal RAM in
556 * retention
557 * - EPOD_STATE_ON_CLK_OFF: The EPOD is switched on, clock is still off
558 * - EPOD_STATE_ON: Same as above, but with clock enabled
559 */
560#define EPOD_STATE_NO_CHANGE 0x00
561#define EPOD_STATE_OFF 0x01
562#define EPOD_STATE_RAMRET 0x02
563#define EPOD_STATE_ON_CLK_OFF 0x03
564#define EPOD_STATE_ON 0x04
565
566/*
567 * CLKOUT sources
568 */
569#define PRCMU_CLKSRC_CLK38M 0x00
570#define PRCMU_CLKSRC_ACLK 0x01
571#define PRCMU_CLKSRC_SYSCLK 0x02
572#define PRCMU_CLKSRC_LCDCLK 0x03
573#define PRCMU_CLKSRC_SDMMCCLK 0x04
574#define PRCMU_CLKSRC_TVCLK 0x05
575#define PRCMU_CLKSRC_TIMCLK 0x06
576#define PRCMU_CLKSRC_CLK009 0x07
577/* These are only valid for CLKOUT1: */
578#define PRCMU_CLKSRC_SIAMMDSPCLK 0x40
579#define PRCMU_CLKSRC_I2CCLK 0x41
580#define PRCMU_CLKSRC_MSP02CLK 0x42
581#define PRCMU_CLKSRC_ARMPLL_OBSCLK 0x43
582#define PRCMU_CLKSRC_HSIRXCLK 0x44
583#define PRCMU_CLKSRC_HSITXCLK 0x45
584#define PRCMU_CLKSRC_ARMCLKFIX 0x46
585#define PRCMU_CLKSRC_HDMICLK 0x47
586
587/*
588 * Definitions for autonomous power management configuration.
589 */
590
591#define PRCMU_AUTO_PM_OFF 0
592#define PRCMU_AUTO_PM_ON 1
593
594#define PRCMU_AUTO_PM_POWER_ON_HSEM BIT(0)
595#define PRCMU_AUTO_PM_POWER_ON_ABB_FIFO_IT BIT(1)
596
597enum prcmu_auto_pm_policy {
598 PRCMU_AUTO_PM_POLICY_NO_CHANGE,
599 PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF,
600 PRCMU_AUTO_PM_POLICY_DSP_OFF_RAMRET_HWP_OFF,
601 PRCMU_AUTO_PM_POLICY_DSP_CLK_OFF_HWP_OFF,
602 PRCMU_AUTO_PM_POLICY_DSP_CLK_OFF_HWP_CLK_OFF,
603};
604
605/**
606 * struct prcmu_auto_pm_config - Autonomous power management configuration.
607 * @sia_auto_pm_enable: SIA autonomous pm enable. (PRCMU_AUTO_PM_{OFF,ON})
608 * @sia_power_on: SIA power ON enable. (PRCMU_AUTO_PM_POWER_ON_* bitmask)
609 * @sia_policy: SIA power policy. (enum prcmu_auto_pm_policy)
610 * @sva_auto_pm_enable: SVA autonomous pm enable. (PRCMU_AUTO_PM_{OFF,ON})
611 * @sva_power_on: SVA power ON enable. (PRCMU_AUTO_PM_POWER_ON_* bitmask)
612 * @sva_policy: SVA power policy. (enum prcmu_auto_pm_policy)
613 */
614struct prcmu_auto_pm_config {
615 u8 sia_auto_pm_enable;
616 u8 sia_power_on;
617 u8 sia_policy;
618 u8 sva_auto_pm_enable;
619 u8 sva_power_on;
620 u8 sva_policy;
621};
622
623/**
624 * enum ddr_opp - DDR OPP states definition
625 * @DDR_100_OPP: The new DDR operating point is ddr100opp
626 * @DDR_50_OPP: The new DDR operating point is ddr50opp
627 * @DDR_25_OPP: The new DDR operating point is ddr25opp
628 */
629enum ddr_opp {
630 DDR_100_OPP = 0x00,
631 DDR_50_OPP = 0x01,
632 DDR_25_OPP = 0x02,
633};
634
635/*
636 * Clock identifiers.
637 */
638enum prcmu_clock {
639 PRCMU_SGACLK,
640 PRCMU_UARTCLK,
641 PRCMU_MSP02CLK,
642 PRCMU_MSP1CLK,
643 PRCMU_I2CCLK,
644 PRCMU_SDMMCCLK,
645 PRCMU_SLIMCLK,
646 PRCMU_PER1CLK,
647 PRCMU_PER2CLK,
648 PRCMU_PER3CLK,
649 PRCMU_PER5CLK,
650 PRCMU_PER6CLK,
651 PRCMU_PER7CLK,
652 PRCMU_LCDCLK,
653 PRCMU_BMLCLK,
654 PRCMU_HSITXCLK,
655 PRCMU_HSIRXCLK,
656 PRCMU_HDMICLK,
657 PRCMU_APEATCLK,
658 PRCMU_APETRACECLK,
659 PRCMU_MCDECLK,
660 PRCMU_IPI2CCLK,
661 PRCMU_DSIALTCLK,
662 PRCMU_DMACLK,
663 PRCMU_B2R2CLK,
664 PRCMU_TVCLK,
665 PRCMU_SSPCLK,
666 PRCMU_RNGCLK,
667 PRCMU_UICCCLK,
668 PRCMU_NUM_REG_CLOCKS,
669 PRCMU_SYSCLK = PRCMU_NUM_REG_CLOCKS,
670 PRCMU_TIMCLK,
671};
672
673/*
674 * Definitions for controlling ESRAM0 in deep sleep.
675 */
676#define ESRAM0_DEEP_SLEEP_STATE_OFF 1
677#define ESRAM0_DEEP_SLEEP_STATE_RET 2
678
679#ifdef CONFIG_MFD_DB8500_PRCMU
680void __init prcmu_early_init(void);
681int prcmu_set_display_clocks(void);
682int prcmu_disable_dsipll(void);
683int prcmu_enable_dsipll(void);
684#else
685static inline void __init prcmu_early_init(void) {}
686#endif
687
688#ifdef CONFIG_MFD_DB8500_PRCMU
689
690int prcmu_set_rc_a2p(enum romcode_write);
691enum romcode_read prcmu_get_rc_p2a(void);
692enum ap_pwrst prcmu_get_xp70_current_state(void);
693int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll);
694
695void prcmu_enable_wakeups(u32 wakeups);
696static inline void prcmu_disable_wakeups(void)
697{
698 prcmu_enable_wakeups(0);
699}
700
701void prcmu_config_abb_event_readout(u32 abb_events);
702void prcmu_get_abb_event_buffer(void __iomem **buf);
703int prcmu_set_arm_opp(u8 opp);
704int prcmu_get_arm_opp(void);
705bool prcmu_has_arm_maxopp(void);
706bool prcmu_is_u8400(void);
707int prcmu_set_ape_opp(u8 opp);
708int prcmu_get_ape_opp(void);
709int prcmu_request_ape_opp_100_voltage(bool enable);
710int prcmu_release_usb_wakeup_state(void);
711int prcmu_set_ddr_opp(u8 opp);
712int prcmu_get_ddr_opp(void);
713unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
714void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
715/* NOTE! Use regulator framework instead */
716int prcmu_set_hwacc(u16 hw_acc_dev, u8 state);
717int prcmu_set_epod(u16 epod_id, u8 epod_state);
718void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
719 struct prcmu_auto_pm_config *idle);
720bool prcmu_is_auto_pm_enabled(void);
721
722int prcmu_config_clkout(u8 clkout, u8 source, u8 div);
723int prcmu_request_clock(u8 clock, bool enable);
724int prcmu_set_clock_divider(u8 clock, u8 divider);
725int prcmu_config_esram0_deep_sleep(u8 state);
726int prcmu_config_hotdog(u8 threshold);
727int prcmu_config_hotmon(u8 low, u8 high);
728int prcmu_start_temp_sense(u16 cycles32k);
729int prcmu_stop_temp_sense(void);
730int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
731int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
732
733void prcmu_ac_wake_req(void);
734void prcmu_ac_sleep_req(void);
735void prcmu_system_reset(u16 reset_code);
736void prcmu_modem_reset(void);
737bool prcmu_is_ac_wake_requested(void);
738void prcmu_enable_spi2(void);
739void prcmu_disable_spi2(void);
740
741#else /* !CONFIG_MFD_DB8500_PRCMU */
742
743static inline int prcmu_set_rc_a2p(enum romcode_write code)
744{
745 return 0;
746}
747
748static inline enum romcode_read prcmu_get_rc_p2a(void)
749{
750 return INIT;
751}
752
753static inline enum ap_pwrst prcmu_get_xp70_current_state(void)
754{
755 return AP_EXECUTE;
756}
757
758static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
759 bool keep_ap_pll)
760{
761 return 0;
762}
763
764static inline void prcmu_enable_wakeups(u32 wakeups) {}
765
766static inline void prcmu_disable_wakeups(void) {}
767
768static inline void prcmu_config_abb_event_readout(u32 abb_events) {}
769
770static inline int prcmu_set_arm_opp(u8 opp)
771{
772 return 0;
773}
774
775static inline int prcmu_get_arm_opp(void)
776{
777 return ARM_100_OPP;
778}
779
780static bool prcmu_has_arm_maxopp(void)
781{
782 return false;
783}
784
785static bool prcmu_is_u8400(void)
786{
787 return false;
788}
789
790static inline int prcmu_set_ape_opp(u8 opp)
791{
792 return 0;
793}
794
795static inline int prcmu_get_ape_opp(void)
796{
797 return APE_100_OPP;
798}
799
800static inline int prcmu_request_ape_opp_100_voltage(bool enable)
801{
802 return 0;
803}
804
805static inline int prcmu_release_usb_wakeup_state(void)
806{
807 return 0;
808}
809
810static inline int prcmu_set_ddr_opp(u8 opp)
811{
812 return 0;
813}
814
815static inline int prcmu_get_ddr_opp(void)
816{
817 return DDR_100_OPP;
818}
819
820static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
821{
822 return 0;
823}
824
825static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {}
826
827static inline int prcmu_set_hwacc(u16 hw_acc_dev, u8 state)
828{
829 return 0;
830}
831
832static inline void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
833 struct prcmu_auto_pm_config *idle)
834{
835}
836
837static inline bool prcmu_is_auto_pm_enabled(void)
838{
839 return false;
840}
841
842static inline int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
843{
844 return 0;
845}
846
847static inline int prcmu_request_clock(u8 clock, bool enable)
848{
849 return 0;
850}
851
852static inline int prcmu_set_clock_divider(u8 clock, u8 divider)
853{
854 return 0;
855}
856
857int prcmu_config_esram0_deep_sleep(u8 state)
858{
859 return 0;
860}
861
862static inline int prcmu_config_hotdog(u8 threshold)
863{
864 return 0;
865}
866
867static inline int prcmu_config_hotmon(u8 low, u8 high)
868{
869 return 0;
870}
871
872static inline int prcmu_start_temp_sense(u16 cycles32k)
873{
874 return 0;
875}
876
877static inline int prcmu_stop_temp_sense(void)
878{
879 return 0;
880}
881
882static inline int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
883{
884 return -ENOSYS;
885}
886
887static inline int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
888{
889 return -ENOSYS;
890}
891
892static inline void prcmu_ac_wake_req(void) {}
893
894static inline void prcmu_ac_sleep_req(void) {}
895
896static inline void prcmu_system_reset(u16 reset_code) {}
897
898static inline void prcmu_modem_reset(void) {}
899
900static inline bool prcmu_is_ac_wake_requested(void)
901{
902 return false;
903}
904
905#ifndef CONFIG_UX500_SOC_DB5500
906static inline int prcmu_set_display_clocks(void)
907{
908 return 0;
909}
910
911static inline int prcmu_disable_dsipll(void)
912{
913 return 0;
914}
915
916static inline int prcmu_enable_dsipll(void)
917{
918 return 0;
919}
920#endif
921
922static inline int prcmu_enable_spi2(void)
923{
924 return 0;
925}
926
927static inline int prcmu_disable_spi2(void)
928{
929 return 0;
930}
931
932#endif /* !CONFIG_MFD_DB8500_PRCMU */
933
934#ifdef CONFIG_UX500_PRCMU_QOS_POWER
935int prcmu_qos_requirement(int pm_qos_class);
936int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value);
937int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value);
938void prcmu_qos_remove_requirement(int pm_qos_class, char *name);
939int prcmu_qos_add_notifier(int prcmu_qos_class,
940 struct notifier_block *notifier);
941int prcmu_qos_remove_notifier(int prcmu_qos_class,
942 struct notifier_block *notifier);
943#else
944static inline int prcmu_qos_requirement(int prcmu_qos_class)
945{
946 return 0;
947}
948
949static inline int prcmu_qos_add_requirement(int prcmu_qos_class,
950 char *name, s32 value)
951{
952 return 0;
953}
954
955static inline int prcmu_qos_update_requirement(int prcmu_qos_class,
956 char *name, s32 new_value)
957{
958 return 0;
959}
960
961static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
962{
963}
964
965static inline int prcmu_qos_add_notifier(int prcmu_qos_class,
966 struct notifier_block *notifier)
967{
968 return 0;
969}
970static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
971 struct notifier_block *notifier)
972{
973 return 0;
974}
975
976#endif
977
978#endif /* __MFD_DB8500_PRCMU_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6507dde38b16..8eb969ebf904 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -153,6 +153,7 @@ extern pgprot_t protection_map[16];
153#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ 153#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
154#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ 154#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
155#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ 155#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
156#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
156 157
157/* 158/*
158 * This interface is used by x86 PAT code to identify a pfn mapping that is 159 * This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -604,10 +605,6 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
604#define NODE_NOT_IN_PAGE_FLAGS 605#define NODE_NOT_IN_PAGE_FLAGS
605#endif 606#endif
606 607
607#ifndef PFN_SECTION_SHIFT
608#define PFN_SECTION_SHIFT 0
609#endif
610
611/* 608/*
612 * Define the bit shifts to access each section. For non-existent 609 * Define the bit shifts to access each section. For non-existent
613 * sections we define the shift as 0; that plus a 0 mask ensures 610 * sections we define the shift as 0; that plus a 0 mask ensures
@@ -681,6 +678,12 @@ static inline struct zone *page_zone(struct page *page)
681} 678}
682 679
683#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 680#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
681static inline void set_page_section(struct page *page, unsigned long section)
682{
683 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
684 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
685}
686
684static inline unsigned long page_to_section(struct page *page) 687static inline unsigned long page_to_section(struct page *page)
685{ 688{
686 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 689 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
@@ -699,18 +702,14 @@ static inline void set_page_node(struct page *page, unsigned long node)
699 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 702 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
700} 703}
701 704
702static inline void set_page_section(struct page *page, unsigned long section)
703{
704 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
705 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
706}
707
708static inline void set_page_links(struct page *page, enum zone_type zone, 705static inline void set_page_links(struct page *page, enum zone_type zone,
709 unsigned long node, unsigned long pfn) 706 unsigned long node, unsigned long pfn)
710{ 707{
711 set_page_zone(page, zone); 708 set_page_zone(page, zone);
712 set_page_node(page, node); 709 set_page_node(page, node);
710#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
713 set_page_section(page, pfn_to_section_nr(pfn)); 711 set_page_section(page, pfn_to_section_nr(pfn));
712#endif
714} 713}
715 714
716/* 715/*
@@ -862,26 +861,18 @@ extern void pagefault_out_of_memory(void);
862#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 861#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
863 862
864/* 863/*
865 * Flags passed to show_mem() and __show_free_areas() to suppress output in 864 * Flags passed to show_mem() and show_free_areas() to suppress output in
866 * various contexts. 865 * various contexts.
867 */ 866 */
868#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */ 867#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
869 868
870extern void show_free_areas(void); 869extern void show_free_areas(unsigned int flags);
871extern void __show_free_areas(unsigned int flags); 870extern bool skip_free_areas_node(unsigned int flags, int nid);
872 871
873int shmem_lock(struct file *file, int lock, struct user_struct *user); 872int shmem_lock(struct file *file, int lock, struct user_struct *user);
874struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); 873struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
875int shmem_zero_setup(struct vm_area_struct *); 874int shmem_zero_setup(struct vm_area_struct *);
876 875
877#ifndef CONFIG_MMU
878extern unsigned long shmem_get_unmapped_area(struct file *file,
879 unsigned long addr,
880 unsigned long len,
881 unsigned long pgoff,
882 unsigned long flags);
883#endif
884
885extern int can_do_mlock(void); 876extern int can_do_mlock(void);
886extern int user_shm_lock(size_t, struct user_struct *); 877extern int user_shm_lock(size_t, struct user_struct *);
887extern void user_shm_unlock(size_t, struct user_struct *); 878extern void user_shm_unlock(size_t, struct user_struct *);
@@ -894,8 +885,6 @@ struct zap_details {
894 struct address_space *check_mapping; /* Check page->mapping if set */ 885 struct address_space *check_mapping; /* Check page->mapping if set */
895 pgoff_t first_index; /* Lowest page->index to unmap */ 886 pgoff_t first_index; /* Lowest page->index to unmap */
896 pgoff_t last_index; /* Highest page->index to unmap */ 887 pgoff_t last_index; /* Highest page->index to unmap */
897 spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
898 unsigned long truncate_count; /* Compare vm_truncate_count */
899}; 888};
900 889
901struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 890struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -905,7 +894,7 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
905 unsigned long size); 894 unsigned long size);
906unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 895unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
907 unsigned long size, struct zap_details *); 896 unsigned long size, struct zap_details *);
908unsigned long unmap_vmas(struct mmu_gather **tlb, 897unsigned long unmap_vmas(struct mmu_gather *tlb,
909 struct vm_area_struct *start_vma, unsigned long start_addr, 898 struct vm_area_struct *start_vma, unsigned long start_addr,
910 unsigned long end_addr, unsigned long *nr_accounted, 899 unsigned long end_addr, unsigned long *nr_accounted,
911 struct zap_details *); 900 struct zap_details *);
@@ -1056,65 +1045,35 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1056/* 1045/*
1057 * per-process(per-mm_struct) statistics. 1046 * per-process(per-mm_struct) statistics.
1058 */ 1047 */
1059#if defined(SPLIT_RSS_COUNTING)
1060/*
1061 * The mm counters are not protected by its page_table_lock,
1062 * so must be incremented atomically.
1063 */
1064static inline void set_mm_counter(struct mm_struct *mm, int member, long value) 1048static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
1065{ 1049{
1066 atomic_long_set(&mm->rss_stat.count[member], value); 1050 atomic_long_set(&mm->rss_stat.count[member], value);
1067} 1051}
1068 1052
1053#if defined(SPLIT_RSS_COUNTING)
1069unsigned long get_mm_counter(struct mm_struct *mm, int member); 1054unsigned long get_mm_counter(struct mm_struct *mm, int member);
1070 1055#else
1071static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1072{
1073 atomic_long_add(value, &mm->rss_stat.count[member]);
1074}
1075
1076static inline void inc_mm_counter(struct mm_struct *mm, int member)
1077{
1078 atomic_long_inc(&mm->rss_stat.count[member]);
1079}
1080
1081static inline void dec_mm_counter(struct mm_struct *mm, int member)
1082{
1083 atomic_long_dec(&mm->rss_stat.count[member]);
1084}
1085
1086#else /* !USE_SPLIT_PTLOCKS */
1087/*
1088 * The mm counters are protected by its page_table_lock,
1089 * so can be incremented directly.
1090 */
1091static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
1092{
1093 mm->rss_stat.count[member] = value;
1094}
1095
1096static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1056static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1097{ 1057{
1098 return mm->rss_stat.count[member]; 1058 return atomic_long_read(&mm->rss_stat.count[member]);
1099} 1059}
1060#endif
1100 1061
1101static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1062static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1102{ 1063{
1103 mm->rss_stat.count[member] += value; 1064 atomic_long_add(value, &mm->rss_stat.count[member]);
1104} 1065}
1105 1066
1106static inline void inc_mm_counter(struct mm_struct *mm, int member) 1067static inline void inc_mm_counter(struct mm_struct *mm, int member)
1107{ 1068{
1108 mm->rss_stat.count[member]++; 1069 atomic_long_inc(&mm->rss_stat.count[member]);
1109} 1070}
1110 1071
1111static inline void dec_mm_counter(struct mm_struct *mm, int member) 1072static inline void dec_mm_counter(struct mm_struct *mm, int member)
1112{ 1073{
1113 mm->rss_stat.count[member]--; 1074 atomic_long_dec(&mm->rss_stat.count[member]);
1114} 1075}
1115 1076
1116#endif /* !USE_SPLIT_PTLOCKS */
1117
1118static inline unsigned long get_mm_rss(struct mm_struct *mm) 1077static inline unsigned long get_mm_rss(struct mm_struct *mm)
1119{ 1078{
1120 return get_mm_counter(mm, MM_FILEPAGES) + 1079 return get_mm_counter(mm, MM_FILEPAGES) +
@@ -1163,13 +1122,24 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
1163#endif 1122#endif
1164 1123
1165/* 1124/*
1125 * This struct is used to pass information from page reclaim to the shrinkers.
1126 * We consolidate the values for easier extention later.
1127 */
1128struct shrink_control {
1129 gfp_t gfp_mask;
1130
1131 /* How many slab objects shrinker() should scan and try to reclaim */
1132 unsigned long nr_to_scan;
1133};
1134
1135/*
1166 * A callback you can register to apply pressure to ageable caches. 1136 * A callback you can register to apply pressure to ageable caches.
1167 * 1137 *
1168 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should 1138 * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
1169 * look through the least-recently-used 'nr_to_scan' entries and 1139 * and a 'gfpmask'. It should look through the least-recently-used
1170 * attempt to free them up. It should return the number of objects 1140 * 'nr_to_scan' entries and attempt to free them up. It should return
1171 * which remain in the cache. If it returns -1, it means it cannot do 1141 * the number of objects which remain in the cache. If it returns -1, it means
1172 * any scanning at this time (eg. there is a risk of deadlock). 1142 * it cannot do any scanning at this time (eg. there is a risk of deadlock).
1173 * 1143 *
1174 * The 'gfpmask' refers to the allocation we are currently trying to 1144 * The 'gfpmask' refers to the allocation we are currently trying to
1175 * fulfil. 1145 * fulfil.
@@ -1178,7 +1148,7 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
1178 * querying the cache size, so a fastpath for that case is appropriate. 1148 * querying the cache size, so a fastpath for that case is appropriate.
1179 */ 1149 */
1180struct shrinker { 1150struct shrinker {
1181 int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask); 1151 int (*shrink)(struct shrinker *, struct shrink_control *sc);
1182 int seeks; /* seeks to recreate an obj */ 1152 int seeks; /* seeks to recreate an obj */
1183 1153
1184 /* These are for internal use */ 1154 /* These are for internal use */
@@ -1380,7 +1350,7 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
1380extern void memmap_init_zone(unsigned long, int, unsigned long, 1350extern void memmap_init_zone(unsigned long, int, unsigned long,
1381 unsigned long, enum memmap_context); 1351 unsigned long, enum memmap_context);
1382extern void setup_per_zone_wmarks(void); 1352extern void setup_per_zone_wmarks(void);
1383extern void calculate_zone_inactive_ratio(struct zone *zone); 1353extern int __meminit init_per_zone_wmark_min(void);
1384extern void mem_init(void); 1354extern void mem_init(void);
1385extern void __init mmap_init(void); 1355extern void __init mmap_init(void);
1386extern void show_mem(unsigned int flags); 1356extern void show_mem(unsigned int flags);
@@ -1388,6 +1358,8 @@ extern void si_meminfo(struct sysinfo * val);
1388extern void si_meminfo_node(struct sysinfo *val, int nid); 1358extern void si_meminfo_node(struct sysinfo *val, int nid);
1389extern int after_bootmem; 1359extern int after_bootmem;
1390 1360
1361extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1362
1391extern void setup_per_cpu_pageset(void); 1363extern void setup_per_cpu_pageset(void);
1392 1364
1393extern void zone_pcp_update(struct zone *zone); 1365extern void zone_pcp_update(struct zone *zone);
@@ -1517,15 +1489,17 @@ unsigned long ra_submit(struct file_ra_state *ra,
1517 struct address_space *mapping, 1489 struct address_space *mapping,
1518 struct file *filp); 1490 struct file *filp);
1519 1491
1520/* Do stack extension */ 1492/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
1521extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1493extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1494
1495/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
1496extern int expand_downwards(struct vm_area_struct *vma,
1497 unsigned long address);
1522#if VM_GROWSUP 1498#if VM_GROWSUP
1523extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1499extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1524#else 1500#else
1525 #define expand_upwards(vma, address) do { } while (0) 1501 #define expand_upwards(vma, address) do { } while (0)
1526#endif 1502#endif
1527extern int expand_stack_downwards(struct vm_area_struct *vma,
1528 unsigned long address);
1529 1503
1530/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1504/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1531extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 1505extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
@@ -1627,8 +1601,9 @@ int in_gate_area_no_mm(unsigned long addr);
1627 1601
1628int drop_caches_sysctl_handler(struct ctl_table *, int, 1602int drop_caches_sysctl_handler(struct ctl_table *, int,
1629 void __user *, size_t *, loff_t *); 1603 void __user *, size_t *, loff_t *);
1630unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 1604unsigned long shrink_slab(struct shrink_control *shrink,
1631 unsigned long lru_pages); 1605 unsigned long nr_pages_scanned,
1606 unsigned long lru_pages);
1632 1607
1633#ifndef CONFIG_MMU 1608#ifndef CONFIG_MMU
1634#define randomize_va_space 0 1609#define randomize_va_space 0
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 02aa5619709b..071d459e866b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -175,7 +175,6 @@ struct vm_area_struct {
175 units, *not* PAGE_CACHE_SIZE */ 175 units, *not* PAGE_CACHE_SIZE */
176 struct file * vm_file; /* File we map to (can be NULL). */ 176 struct file * vm_file; /* File we map to (can be NULL). */
177 void * vm_private_data; /* was vm_pte (shared mem) */ 177 void * vm_private_data; /* was vm_pte (shared mem) */
178 unsigned long vm_truncate_count;/* truncate_count or restart_addr */
179 178
180#ifndef CONFIG_MMU 179#ifndef CONFIG_MMU
181 struct vm_region *vm_region; /* NOMMU mapping region */ 180 struct vm_region *vm_region; /* NOMMU mapping region */
@@ -205,19 +204,16 @@ enum {
205 204
206#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU) 205#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
207#define SPLIT_RSS_COUNTING 206#define SPLIT_RSS_COUNTING
208struct mm_rss_stat {
209 atomic_long_t count[NR_MM_COUNTERS];
210};
211/* per-thread cached information, */ 207/* per-thread cached information, */
212struct task_rss_stat { 208struct task_rss_stat {
213 int events; /* for synchronization threshold */ 209 int events; /* for synchronization threshold */
214 int count[NR_MM_COUNTERS]; 210 int count[NR_MM_COUNTERS];
215}; 211};
216#else /* !USE_SPLIT_PTLOCKS */ 212#endif /* USE_SPLIT_PTLOCKS */
213
217struct mm_rss_stat { 214struct mm_rss_stat {
218 unsigned long count[NR_MM_COUNTERS]; 215 atomic_long_t count[NR_MM_COUNTERS];
219}; 216};
220#endif /* !USE_SPLIT_PTLOCKS */
221 217
222struct mm_struct { 218struct mm_struct {
223 struct vm_area_struct * mmap; /* list of VMAs */ 219 struct vm_area_struct * mmap; /* list of VMAs */
@@ -266,8 +262,6 @@ struct mm_struct {
266 262
267 struct linux_binfmt *binfmt; 263 struct linux_binfmt *binfmt;
268 264
269 cpumask_t cpu_vm_mask;
270
271 /* Architecture-specific MM context */ 265 /* Architecture-specific MM context */
272 mm_context_t context; 266 mm_context_t context;
273 267
@@ -317,9 +311,14 @@ struct mm_struct {
317#ifdef CONFIG_TRANSPARENT_HUGEPAGE 311#ifdef CONFIG_TRANSPARENT_HUGEPAGE
318 pgtable_t pmd_huge_pte; /* protected by page_table_lock */ 312 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
319#endif 313#endif
314
315 cpumask_var_t cpu_vm_mask_var;
320}; 316};
321 317
322/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ 318/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
323#define mm_cpumask(mm) (&(mm)->cpu_vm_mask) 319static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
320{
321 return mm->cpu_vm_mask_var;
322}
324 323
325#endif /* _LINUX_MM_TYPES_H */ 324#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index cc2e7dfea9d7..1d1b1e13f79f 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -150,7 +150,7 @@ struct mmu_notifier_ops {
150 * Therefore notifier chains can only be traversed when either 150 * Therefore notifier chains can only be traversed when either
151 * 151 *
152 * 1. mmap_sem is held. 152 * 1. mmap_sem is held.
153 * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock). 153 * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex).
154 * 3. No other concurrent thread can access the list (release) 154 * 3. No other concurrent thread can access the list (release)
155 */ 155 */
156struct mmu_notifier { 156struct mmu_notifier {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e56f835274c9..217bcf6bca77 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -928,9 +928,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
928#define pfn_to_nid(pfn) (0) 928#define pfn_to_nid(pfn) (0)
929#endif 929#endif
930 930
931#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
932#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
933
934#ifdef CONFIG_SPARSEMEM 931#ifdef CONFIG_SPARSEMEM
935 932
936/* 933/*
@@ -956,6 +953,12 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
956#error Allocator MAX_ORDER exceeds SECTION_SIZE 953#error Allocator MAX_ORDER exceeds SECTION_SIZE
957#endif 954#endif
958 955
956#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
957#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
958
959#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
960#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
961
959struct page; 962struct page;
960struct page_cgroup; 963struct page_cgroup;
961struct mem_section { 964struct mem_section {
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index c75471db576e..a940fe435aca 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -132,6 +132,7 @@ static inline int mutex_is_locked(struct mutex *lock)
132 */ 132 */
133#ifdef CONFIG_DEBUG_LOCK_ALLOC 133#ifdef CONFIG_DEBUG_LOCK_ALLOC
134extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 134extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
135extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
135extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 136extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
136 unsigned int subclass); 137 unsigned int subclass);
137extern int __must_check mutex_lock_killable_nested(struct mutex *lock, 138extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
@@ -140,6 +141,13 @@ extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
140#define mutex_lock(lock) mutex_lock_nested(lock, 0) 141#define mutex_lock(lock) mutex_lock_nested(lock, 0)
141#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) 142#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
142#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) 143#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
144
145#define mutex_lock_nest_lock(lock, nest_lock) \
146do { \
147 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
148 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
149} while (0)
150
143#else 151#else
144extern void mutex_lock(struct mutex *lock); 152extern void mutex_lock(struct mutex *lock);
145extern int __must_check mutex_lock_interruptible(struct mutex *lock); 153extern int __must_check mutex_lock_interruptible(struct mutex *lock);
@@ -148,6 +156,7 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
148# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 156# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
149# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 157# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
150# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) 158# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
159# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
151#endif 160#endif
152 161
153/* 162/*
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 5e3aa8311c5e..4952fb874ad3 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -40,6 +40,8 @@ enum oom_constraint {
40 CONSTRAINT_MEMCG, 40 CONSTRAINT_MEMCG,
41}; 41};
42 42
43extern int test_set_oom_score_adj(int new_val);
44
43extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, 45extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
44 const nodemask_t *nodemask, unsigned long totalpages); 46 const nodemask_t *nodemask, unsigned long totalpages);
45extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 47extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c11950652646..716875e53520 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -219,6 +219,12 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x)
219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); 219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
220} 220}
221 221
222static inline struct page *page_cache_alloc_readahead(struct address_space *x)
223{
224 return __page_cache_alloc(mapping_gfp_mask(x) |
225 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
226}
227
222typedef int filler_t(void *, struct page *); 228typedef int filler_t(void *, struct page *);
223 229
224extern struct page * find_get_page(struct address_space *mapping, 230extern struct page * find_get_page(struct address_space *mapping,
@@ -357,6 +363,15 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
357 */ 363 */
358extern void wait_on_page_bit(struct page *page, int bit_nr); 364extern void wait_on_page_bit(struct page *page, int bit_nr);
359 365
366extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
367
368static inline int wait_on_page_locked_killable(struct page *page)
369{
370 if (PageLocked(page))
371 return wait_on_page_bit_killable(page, PG_locked);
372 return 0;
373}
374
360/* 375/*
361 * Wait for a page to be unlocked. 376 * Wait for a page to be unlocked.
362 * 377 *
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 46f6ba56fa91..5edc9014263a 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -75,7 +75,7 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
75 barrier(); /* Prevent reloads of fbc->count */ 75 barrier(); /* Prevent reloads of fbc->count */
76 if (ret >= 0) 76 if (ret >= 0)
77 return ret; 77 return ret;
78 return 1; 78 return 0;
79} 79}
80 80
81static inline int percpu_counter_initialized(struct percpu_counter *fbc) 81static inline int percpu_counter_initialized(struct percpu_counter *fbc)
@@ -133,6 +133,10 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc)
133 return fbc->count; 133 return fbc->count;
134} 134}
135 135
136/*
137 * percpu_counter is intended to track positive numbers. In the UP case the
138 * number should never be negative.
139 */
136static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) 140static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
137{ 141{
138 return fbc->count; 142 return fbc->count;
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 808227d40a64..959c14132f46 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -82,6 +82,7 @@ struct k_itimer {
82 unsigned long expires; 82 unsigned long expires;
83 } mmtimer; 83 } mmtimer;
84 struct alarm alarmtimer; 84 struct alarm alarmtimer;
85 struct rcu_head rcu;
85 } it; 86 } it;
86}; 87};
87 88
diff --git a/include/linux/printk.h b/include/linux/printk.h
index ee048e77e1ae..0101d55d9651 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -1,6 +1,8 @@
1#ifndef __KERNEL_PRINTK__ 1#ifndef __KERNEL_PRINTK__
2#define __KERNEL_PRINTK__ 2#define __KERNEL_PRINTK__
3 3
4#include <linux/init.h>
5
4extern const char linux_banner[]; 6extern const char linux_banner[];
5extern const char linux_proc_banner[]; 7extern const char linux_proc_banner[];
6 8
@@ -113,6 +115,7 @@ extern int dmesg_restrict;
113extern int kptr_restrict; 115extern int kptr_restrict;
114 116
115void log_buf_kexec_setup(void); 117void log_buf_kexec_setup(void);
118void __init setup_log_buf(int early);
116#else 119#else
117static inline __attribute__ ((format (printf, 1, 0))) 120static inline __attribute__ ((format (printf, 1, 0)))
118int vprintk(const char *s, va_list args) 121int vprintk(const char *s, va_list args)
@@ -137,6 +140,10 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
137static inline void log_buf_kexec_setup(void) 140static inline void log_buf_kexec_setup(void)
138{ 141{
139} 142}
143
144static inline void setup_log_buf(int early)
145{
146}
140#endif 147#endif
141 148
142extern void dump_stack(void) __cold; 149extern void dump_stack(void) __cold;
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index eaf4350c0f90..3686cd6c9aca 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -288,12 +288,4 @@ static inline struct net *PDE_NET(struct proc_dir_entry *pde)
288 return pde->parent->data; 288 return pde->parent->data;
289} 289}
290 290
291struct proc_maps_private {
292 struct pid *pid;
293 struct task_struct *task;
294#ifdef CONFIG_MMU
295 struct vm_area_struct *tail_vma;
296#endif
297};
298
299#endif /* _LINUX_PROC_FS_H */ 291#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index 943a85ab0020..e07e2742a865 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/if_ether.h> 26#include <linux/if_ether.h>
27#include <linux/if_vlan.h> 27#include <linux/if_vlan.h>
28#include <linux/ip.h>
28#include <linux/filter.h> 29#include <linux/filter.h>
29#ifdef __KERNEL__ 30#ifdef __KERNEL__
30#include <linux/in.h> 31#include <linux/in.h>
@@ -58,6 +59,12 @@
58#define OFF_NEXT 6 59#define OFF_NEXT 6
59#define OFF_UDP_DST 2 60#define OFF_UDP_DST 2
60 61
62#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
63#define OFF_PTP_SEQUENCE_ID 30
64#define OFF_PTP_CONTROL 32 /* PTPv1 only */
65
66#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
67
61#define IP6_HLEN 40 68#define IP6_HLEN 40
62#define UDP_HLEN 8 69#define UDP_HLEN 8
63 70
diff --git a/include/linux/ptp_clock.h b/include/linux/ptp_clock.h
new file mode 100644
index 000000000000..94e981f810a2
--- /dev/null
+++ b/include/linux/ptp_clock.h
@@ -0,0 +1,84 @@
1/*
2 * PTP 1588 clock support - user space interface
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef _PTP_CLOCK_H_
22#define _PTP_CLOCK_H_
23
24#include <linux/ioctl.h>
25#include <linux/types.h>
26
27/* PTP_xxx bits, for the flags field within the request structures. */
28#define PTP_ENABLE_FEATURE (1<<0)
29#define PTP_RISING_EDGE (1<<1)
30#define PTP_FALLING_EDGE (1<<2)
31
32/*
33 * struct ptp_clock_time - represents a time value
34 *
35 * The sign of the seconds field applies to the whole value. The
36 * nanoseconds field is always unsigned. The reserved field is
37 * included for sub-nanosecond resolution, should the demand for
38 * this ever appear.
39 *
40 */
41struct ptp_clock_time {
42 __s64 sec; /* seconds */
43 __u32 nsec; /* nanoseconds */
44 __u32 reserved;
45};
46
47struct ptp_clock_caps {
48 int max_adj; /* Maximum frequency adjustment in parts per billon. */
49 int n_alarm; /* Number of programmable alarms. */
50 int n_ext_ts; /* Number of external time stamp channels. */
51 int n_per_out; /* Number of programmable periodic signals. */
52 int pps; /* Whether the clock supports a PPS callback. */
53 int rsv[15]; /* Reserved for future use. */
54};
55
56struct ptp_extts_request {
57 unsigned int index; /* Which channel to configure. */
58 unsigned int flags; /* Bit field for PTP_xxx flags. */
59 unsigned int rsv[2]; /* Reserved for future use. */
60};
61
62struct ptp_perout_request {
63 struct ptp_clock_time start; /* Absolute start time. */
64 struct ptp_clock_time period; /* Desired period, zero means disable. */
65 unsigned int index; /* Which channel to configure. */
66 unsigned int flags; /* Reserved for future use. */
67 unsigned int rsv[4]; /* Reserved for future use. */
68};
69
70#define PTP_CLK_MAGIC '='
71
72#define PTP_CLOCK_GETCAPS _IOR(PTP_CLK_MAGIC, 1, struct ptp_clock_caps)
73#define PTP_EXTTS_REQUEST _IOW(PTP_CLK_MAGIC, 2, struct ptp_extts_request)
74#define PTP_PEROUT_REQUEST _IOW(PTP_CLK_MAGIC, 3, struct ptp_perout_request)
75#define PTP_ENABLE_PPS _IOW(PTP_CLK_MAGIC, 4, int)
76
77struct ptp_extts_event {
78 struct ptp_clock_time t; /* Time event occured. */
79 unsigned int index; /* Which channel produced the event. */
80 unsigned int flags; /* Reserved for future use. */
81 unsigned int rsv[2]; /* Reserved for future use. */
82};
83
84#endif
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
new file mode 100644
index 000000000000..dd2e44fba63e
--- /dev/null
+++ b/include/linux/ptp_clock_kernel.h
@@ -0,0 +1,139 @@
1/*
2 * PTP 1588 clock support
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef _PTP_CLOCK_KERNEL_H_
22#define _PTP_CLOCK_KERNEL_H_
23
24#include <linux/ptp_clock.h>
25
26
27struct ptp_clock_request {
28 enum {
29 PTP_CLK_REQ_EXTTS,
30 PTP_CLK_REQ_PEROUT,
31 PTP_CLK_REQ_PPS,
32 } type;
33 union {
34 struct ptp_extts_request extts;
35 struct ptp_perout_request perout;
36 };
37};
38
39/**
40 * struct ptp_clock_info - decribes a PTP hardware clock
41 *
42 * @owner: The clock driver should set to THIS_MODULE.
43 * @name: A short name to identify the clock.
44 * @max_adj: The maximum possible frequency adjustment, in parts per billon.
45 * @n_alarm: The number of programmable alarms.
46 * @n_ext_ts: The number of external time stamp channels.
47 * @n_per_out: The number of programmable periodic signals.
48 * @pps: Indicates whether the clock supports a PPS callback.
49 *
50 * clock operations
51 *
52 * @adjfreq: Adjusts the frequency of the hardware clock.
53 * parameter delta: Desired period change in parts per billion.
54 *
55 * @adjtime: Shifts the time of the hardware clock.
56 * parameter delta: Desired change in nanoseconds.
57 *
58 * @gettime: Reads the current time from the hardware clock.
59 * parameter ts: Holds the result.
60 *
61 * @settime: Set the current time on the hardware clock.
62 * parameter ts: Time value to set.
63 *
64 * @enable: Request driver to enable or disable an ancillary feature.
65 * parameter request: Desired resource to enable or disable.
66 * parameter on: Caller passes one to enable or zero to disable.
67 *
68 * Drivers should embed their ptp_clock_info within a private
69 * structure, obtaining a reference to it using container_of().
70 *
71 * The callbacks must all return zero on success, non-zero otherwise.
72 */
73
74struct ptp_clock_info {
75 struct module *owner;
76 char name[16];
77 s32 max_adj;
78 int n_alarm;
79 int n_ext_ts;
80 int n_per_out;
81 int pps;
82 int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
83 int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
84 int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts);
85 int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts);
86 int (*enable)(struct ptp_clock_info *ptp,
87 struct ptp_clock_request *request, int on);
88};
89
90struct ptp_clock;
91
92/**
93 * ptp_clock_register() - register a PTP hardware clock driver
94 *
95 * @info: Structure describing the new clock.
96 */
97
98extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info);
99
100/**
101 * ptp_clock_unregister() - unregister a PTP hardware clock driver
102 *
103 * @ptp: The clock to remove from service.
104 */
105
106extern int ptp_clock_unregister(struct ptp_clock *ptp);
107
108
109enum ptp_clock_events {
110 PTP_CLOCK_ALARM,
111 PTP_CLOCK_EXTTS,
112 PTP_CLOCK_PPS,
113};
114
115/**
116 * struct ptp_clock_event - decribes a PTP hardware clock event
117 *
118 * @type: One of the ptp_clock_events enumeration values.
119 * @index: Identifies the source of the event.
120 * @timestamp: When the event occured.
121 */
122
123struct ptp_clock_event {
124 int type;
125 int index;
126 u64 timestamp;
127};
128
129/**
130 * ptp_clock_event() - notify the PTP layer about an event
131 *
132 * @ptp: The clock obtained from ptp_clock_register().
133 * @event: Message structure describing the event.
134 */
135
136extern void ptp_clock_event(struct ptp_clock *ptp,
137 struct ptp_clock_event *event);
138
139#endif
diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h
new file mode 100644
index 000000000000..612062313b68
--- /dev/null
+++ b/include/linux/regulator/db8500-prcmu.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 *
6 * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
7 *
8 * Interface to power domain regulators on DB8500
9 */
10
11#ifndef __REGULATOR_H__
12#define __REGULATOR_H__
13
14/* Number of DB8500 regulators and regulator enumeration */
15enum db8500_regulator_id {
16 DB8500_REGULATOR_VAPE,
17 DB8500_REGULATOR_VARM,
18 DB8500_REGULATOR_VMODEM,
19 DB8500_REGULATOR_VPLL,
20 DB8500_REGULATOR_VSMPS1,
21 DB8500_REGULATOR_VSMPS2,
22 DB8500_REGULATOR_VSMPS3,
23 DB8500_REGULATOR_VRF1,
24 DB8500_REGULATOR_SWITCH_SVAMMDSP,
25 DB8500_REGULATOR_SWITCH_SVAMMDSPRET,
26 DB8500_REGULATOR_SWITCH_SVAPIPE,
27 DB8500_REGULATOR_SWITCH_SIAMMDSP,
28 DB8500_REGULATOR_SWITCH_SIAMMDSPRET,
29 DB8500_REGULATOR_SWITCH_SIAPIPE,
30 DB8500_REGULATOR_SWITCH_SGA,
31 DB8500_REGULATOR_SWITCH_B2R2_MCDE,
32 DB8500_REGULATOR_SWITCH_ESRAM12,
33 DB8500_REGULATOR_SWITCH_ESRAM12RET,
34 DB8500_REGULATOR_SWITCH_ESRAM34,
35 DB8500_REGULATOR_SWITCH_ESRAM34RET,
36 DB8500_NUM_REGULATORS
37};
38
39/*
40 * Exported interface for CPUIdle only. This function is called with all
41 * interrupts turned off.
42 */
43int power_state_active_is_enabled(void);
44
45#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 830e65dc01ee..2148b122779b 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -7,7 +7,7 @@
7#include <linux/list.h> 7#include <linux/list.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/spinlock.h> 10#include <linux/mutex.h>
11#include <linux/memcontrol.h> 11#include <linux/memcontrol.h>
12 12
13/* 13/*
@@ -26,7 +26,7 @@
26 */ 26 */
27struct anon_vma { 27struct anon_vma {
28 struct anon_vma *root; /* Root of this anon_vma tree */ 28 struct anon_vma *root; /* Root of this anon_vma tree */
29 spinlock_t lock; /* Serialize access to vma list */ 29 struct mutex mutex; /* Serialize access to vma list */
30 /* 30 /*
31 * The refcount is taken on an anon_vma when there is no 31 * The refcount is taken on an anon_vma when there is no
32 * guarantee that the vma of page tables will exist for 32 * guarantee that the vma of page tables will exist for
@@ -64,7 +64,7 @@ struct anon_vma_chain {
64 struct vm_area_struct *vma; 64 struct vm_area_struct *vma;
65 struct anon_vma *anon_vma; 65 struct anon_vma *anon_vma;
66 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ 66 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
67 struct list_head same_anon_vma; /* locked by anon_vma->lock */ 67 struct list_head same_anon_vma; /* locked by anon_vma->mutex */
68}; 68};
69 69
70#ifdef CONFIG_MMU 70#ifdef CONFIG_MMU
@@ -93,24 +93,24 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
93{ 93{
94 struct anon_vma *anon_vma = vma->anon_vma; 94 struct anon_vma *anon_vma = vma->anon_vma;
95 if (anon_vma) 95 if (anon_vma)
96 spin_lock(&anon_vma->root->lock); 96 mutex_lock(&anon_vma->root->mutex);
97} 97}
98 98
99static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) 99static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
100{ 100{
101 struct anon_vma *anon_vma = vma->anon_vma; 101 struct anon_vma *anon_vma = vma->anon_vma;
102 if (anon_vma) 102 if (anon_vma)
103 spin_unlock(&anon_vma->root->lock); 103 mutex_unlock(&anon_vma->root->mutex);
104} 104}
105 105
106static inline void anon_vma_lock(struct anon_vma *anon_vma) 106static inline void anon_vma_lock(struct anon_vma *anon_vma)
107{ 107{
108 spin_lock(&anon_vma->root->lock); 108 mutex_lock(&anon_vma->root->mutex);
109} 109}
110 110
111static inline void anon_vma_unlock(struct anon_vma *anon_vma) 111static inline void anon_vma_unlock(struct anon_vma *anon_vma)
112{ 112{
113 spin_unlock(&anon_vma->root->lock); 113 mutex_unlock(&anon_vma->root->mutex);
114} 114}
115 115
116/* 116/*
@@ -218,20 +218,7 @@ int try_to_munlock(struct page *);
218/* 218/*
219 * Called by memory-failure.c to kill processes. 219 * Called by memory-failure.c to kill processes.
220 */ 220 */
221struct anon_vma *__page_lock_anon_vma(struct page *page); 221struct anon_vma *page_lock_anon_vma(struct page *page);
222
223static inline struct anon_vma *page_lock_anon_vma(struct page *page)
224{
225 struct anon_vma *anon_vma;
226
227 __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
228
229 /* (void) is needed to make gcc happy */
230 (void) __cond_lock(&anon_vma->root->lock, anon_vma);
231
232 return anon_vma;
233}
234
235void page_unlock_anon_vma(struct anon_vma *anon_vma); 222void page_unlock_anon_vma(struct anon_vma *anon_vma);
236int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 223int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
237 224
diff --git a/include/linux/sched.h b/include/linux/sched.h
index aaf71e08222c..f18300eddfcb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1753,7 +1753,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1753#define PF_FROZEN 0x00010000 /* frozen for system suspend */ 1753#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1754#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ 1754#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1755#define PF_KSWAPD 0x00040000 /* I am kswapd */ 1755#define PF_KSWAPD 0x00040000 /* I am kswapd */
1756#define PF_OOM_ORIGIN 0x00080000 /* Allocating much memory to others */
1757#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ 1756#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1758#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 1757#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1759#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ 1758#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
@@ -2177,6 +2176,7 @@ static inline void mmdrop(struct mm_struct * mm)
2177 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 2176 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2178 __mmdrop(mm); 2177 __mmdrop(mm);
2179} 2178}
2179extern int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm);
2180 2180
2181/* mmput gets rid of the mappings and all user-space */ 2181/* mmput gets rid of the mappings and all user-space */
2182extern void mmput(struct mm_struct *); 2182extern void mmput(struct mm_struct *);
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 399be5ad2f99..2b7fec840517 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -9,6 +9,8 @@
9 9
10#define SHMEM_NR_DIRECT 16 10#define SHMEM_NR_DIRECT 16
11 11
12#define SHMEM_SYMLINK_INLINE_LEN (SHMEM_NR_DIRECT * sizeof(swp_entry_t))
13
12struct shmem_inode_info { 14struct shmem_inode_info {
13 spinlock_t lock; 15 spinlock_t lock;
14 unsigned long flags; 16 unsigned long flags;
@@ -17,8 +19,12 @@ struct shmem_inode_info {
17 unsigned long next_index; /* highest alloced index + 1 */ 19 unsigned long next_index; /* highest alloced index + 1 */
18 struct shared_policy policy; /* NUMA memory alloc policy */ 20 struct shared_policy policy; /* NUMA memory alloc policy */
19 struct page *i_indirect; /* top indirect blocks page */ 21 struct page *i_indirect; /* top indirect blocks page */
20 swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* first blocks */ 22 union {
23 swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* first blocks */
24 char inline_symlink[SHMEM_SYMLINK_INLINE_LEN];
25 };
21 struct list_head swaplist; /* chain of maybes on swap */ 26 struct list_head swaplist; /* chain of maybes on swap */
27 struct list_head xattr_list; /* list of shmem_xattr */
22 struct inode vfs_inode; 28 struct inode vfs_inode;
23}; 29};
24 30
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2b3831b58aa4..51359837511a 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -261,6 +261,7 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
261extern void __dec_zone_state(struct zone *, enum zone_stat_item); 261extern void __dec_zone_state(struct zone *, enum zone_stat_item);
262 262
263void refresh_cpu_vm_stats(int); 263void refresh_cpu_vm_stats(int);
264void refresh_zone_stat_thresholds(void);
264 265
265int calculate_pressure_threshold(struct zone *zone); 266int calculate_pressure_threshold(struct zone *zone);
266int calculate_normal_threshold(struct zone *zone); 267int calculate_normal_threshold(struct zone *zone);
@@ -313,6 +314,10 @@ static inline void __dec_zone_page_state(struct page *page,
313#define set_pgdat_percpu_threshold(pgdat, callback) { } 314#define set_pgdat_percpu_threshold(pgdat, callback) { }
314 315
315static inline void refresh_cpu_vm_stats(int cpu) { } 316static inline void refresh_cpu_vm_stats(int cpu) { }
316#endif 317static inline void refresh_zone_stat_thresholds(void) { }
318
319#endif /* CONFIG_SMP */
320
321extern const char * const vmstat_text[];
317 322
318#endif /* _LINUX_VMSTAT_H */ 323#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 6050783005bd..aed54c50aa66 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -13,10 +13,6 @@
13#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ 13#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
14#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ 14#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
15 15
16#ifdef __KERNEL__
17
18#include <linux/types.h>
19
20/* Namespaces */ 16/* Namespaces */
21#define XATTR_OS2_PREFIX "os2." 17#define XATTR_OS2_PREFIX "os2."
22#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1) 18#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1)
@@ -53,6 +49,10 @@
53#define XATTR_CAPS_SUFFIX "capability" 49#define XATTR_CAPS_SUFFIX "capability"
54#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX 50#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
55 51
52#ifdef __KERNEL__
53
54#include <linux/types.h>
55
56struct inode; 56struct inode;
57struct dentry; 57struct dentry;
58 58
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index d2df55b0c213..008711e8e78f 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -241,10 +241,10 @@ enum p9_open_mode_t {
241 241
242/** 242/**
243 * enum p9_perm_t - 9P permissions 243 * enum p9_perm_t - 9P permissions
244 * @P9_DMDIR: mode bite for directories 244 * @P9_DMDIR: mode bit for directories
245 * @P9_DMAPPEND: mode bit for is append-only 245 * @P9_DMAPPEND: mode bit for is append-only
246 * @P9_DMEXCL: mode bit for excluse use (only one open handle allowed) 246 * @P9_DMEXCL: mode bit for excluse use (only one open handle allowed)
247 * @P9_DMMOUNT: mode bite for mount points 247 * @P9_DMMOUNT: mode bit for mount points
248 * @P9_DMAUTH: mode bit for authentication file 248 * @P9_DMAUTH: mode bit for authentication file
249 * @P9_DMTMP: mode bit for non-backed-up files 249 * @P9_DMTMP: mode bit for non-backed-up files
250 * @P9_DMSYMLINK: mode bit for symbolic links (9P2000.u) 250 * @P9_DMSYMLINK: mode bit for symbolic links (9P2000.u)
@@ -362,7 +362,7 @@ struct p9_qid {
362}; 362};
363 363
364/** 364/**
365 * struct p9_stat - file system metadata information 365 * struct p9_wstat - file system metadata information
366 * @size: length prefix for this stat structure instance 366 * @size: length prefix for this stat structure instance
367 * @type: the type of the server (equivalent to a major number) 367 * @type: the type of the server (equivalent to a major number)
368 * @dev: the sub-type of the server (equivalent to a minor number) 368 * @dev: the sub-type of the server (equivalent to a minor number)
@@ -687,10 +687,10 @@ struct p9_rwstat {
687 * @size: prefixed length of the structure 687 * @size: prefixed length of the structure
688 * @id: protocol operating identifier of type &p9_msg_t 688 * @id: protocol operating identifier of type &p9_msg_t
689 * @tag: transaction id of the request 689 * @tag: transaction id of the request
690 * @offset: used by marshalling routines to track currentposition in buffer 690 * @offset: used by marshalling routines to track current position in buffer
691 * @capacity: used by marshalling routines to track total malloc'd capacity 691 * @capacity: used by marshalling routines to track total malloc'd capacity
692 * @pubuf: Payload user buffer given by the caller 692 * @pubuf: Payload user buffer given by the caller
693 * @pubuf: Payload kernel buffer given by the caller 693 * @pkbuf: Payload kernel buffer given by the caller
694 * @pbuf_size: pubuf/pkbuf(only one will be !NULL) size to be read/write. 694 * @pbuf_size: pubuf/pkbuf(only one will be !NULL) size to be read/write.
695 * @private: For transport layer's use. 695 * @private: For transport layer's use.
696 * @sdata: payload 696 * @sdata: payload
@@ -714,7 +714,7 @@ struct p9_fcall {
714 size_t pbuf_size; 714 size_t pbuf_size;
715 void *private; 715 void *private;
716 716
717 uint8_t *sdata; 717 u8 *sdata;
718}; 718};
719 719
720struct p9_idpool; 720struct p9_idpool;
@@ -728,7 +728,6 @@ void p9_idpool_put(int id, struct p9_idpool *p);
728int p9_idpool_check(int id, struct p9_idpool *p); 728int p9_idpool_check(int id, struct p9_idpool *p);
729 729
730int p9_error_init(void); 730int p9_error_init(void);
731int p9_errstr2errno(char *, int);
732int p9_trans_fd_init(void); 731int p9_trans_fd_init(void);
733void p9_trans_fd_exit(void); 732void p9_trans_fd_exit(void);
734#endif /* NET_9P_H */ 733#endif /* NET_9P_H */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 051a99f79769..d26d5e98a173 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -60,7 +60,7 @@ enum p9_trans_status {
60}; 60};
61 61
62/** 62/**
63 * enum p9_req_status_t - virtio request status 63 * enum p9_req_status_t - status of a request
64 * @REQ_STATUS_IDLE: request slot unused 64 * @REQ_STATUS_IDLE: request slot unused
65 * @REQ_STATUS_ALLOC: request has been allocated but not sent 65 * @REQ_STATUS_ALLOC: request has been allocated but not sent
66 * @REQ_STATUS_UNSENT: request waiting to be sent 66 * @REQ_STATUS_UNSENT: request waiting to be sent
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 8f08c736c4c3..d8549fb9c742 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -41,6 +41,7 @@
41 * @pref: Preferences of this transport 41 * @pref: Preferences of this transport
42 * @def: set if this transport should be considered the default 42 * @def: set if this transport should be considered the default
43 * @create: member function to create a new connection on this transport 43 * @create: member function to create a new connection on this transport
44 * @close: member function to discard a connection on this transport
44 * @request: member function to issue a request to the transport 45 * @request: member function to issue a request to the transport
45 * @cancel: member function to cancel a request (if it hasn't been sent) 46 * @cancel: member function to cancel a request (if it hasn't been sent)
46 * 47 *
@@ -48,7 +49,7 @@
48 * transport module with the 9P core network module and used by the client 49 * transport module with the 9P core network module and used by the client
49 * to instantiate a new connection on a transport. 50 * to instantiate a new connection on a transport.
50 * 51 *
51 * BUGS: the transport module list isn't protected. 52 * The transport module list is protected by v9fs_trans_lock.
52 */ 53 */
53 54
54struct p9_trans_module { 55struct p9_trans_module {
diff --git a/arch/arm/plat-omap/include/plat/panel-generic-dpi.h b/include/video/omap-panel-generic-dpi.h
index 790619734bcd..127e3f20328e 100644
--- a/arch/arm/plat-omap/include/plat/panel-generic-dpi.h
+++ b/include/video/omap-panel-generic-dpi.h
@@ -17,10 +17,10 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#ifndef __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H 20#ifndef __OMAP_PANEL_GENERIC_DPI_H
21#define __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H 21#define __OMAP_PANEL_GENERIC_DPI_H
22 22
23#include "display.h" 23struct omap_dss_device;
24 24
25/** 25/**
26 * struct panel_generic_dpi_data - panel driver configuration data 26 * struct panel_generic_dpi_data - panel driver configuration data
@@ -34,4 +34,4 @@ struct panel_generic_dpi_data {
34 void (*platform_disable)(struct omap_dss_device *dssdev); 34 void (*platform_disable)(struct omap_dss_device *dssdev);
35}; 35};
36 36
37#endif /* __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H */ 37#endif /* __OMAP_PANEL_GENERIC_DPI_H */
diff --git a/arch/arm/plat-omap/include/plat/nokia-dsi-panel.h b/include/video/omap-panel-nokia-dsi.h
index 01ab6572ccbb..921ae9327228 100644
--- a/arch/arm/plat-omap/include/plat/nokia-dsi-panel.h
+++ b/include/video/omap-panel-nokia-dsi.h
@@ -1,14 +1,15 @@
1#ifndef __ARCH_ARM_PLAT_OMAP_NOKIA_DSI_PANEL_H 1#ifndef __OMAP_NOKIA_DSI_PANEL_H
2#define __ARCH_ARM_PLAT_OMAP_NOKIA_DSI_PANEL_H 2#define __OMAP_NOKIA_DSI_PANEL_H
3 3
4#include "display.h" 4struct omap_dss_device;
5 5
6/** 6/**
7 * struct nokia_dsi_panel_data - Nokia DSI panel driver configuration 7 * struct nokia_dsi_panel_data - Nokia DSI panel driver configuration
8 * @name: panel name 8 * @name: panel name
9 * @use_ext_te: use external TE 9 * @use_ext_te: use external TE
10 * @ext_te_gpio: external TE GPIO 10 * @ext_te_gpio: external TE GPIO
11 * @use_esd_check: perform ESD checks 11 * @esd_interval: interval of ESD checks, 0 = disabled (ms)
12 * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
12 * @max_backlight_level: maximum backlight level 13 * @max_backlight_level: maximum backlight level
13 * @set_backlight: pointer to backlight set function 14 * @set_backlight: pointer to backlight set function
14 * @get_backlight: pointer to backlight get function 15 * @get_backlight: pointer to backlight get function
@@ -21,11 +22,12 @@ struct nokia_dsi_panel_data {
21 bool use_ext_te; 22 bool use_ext_te;
22 int ext_te_gpio; 23 int ext_te_gpio;
23 24
24 bool use_esd_check; 25 unsigned esd_interval;
26 unsigned ulps_timeout;
25 27
26 int max_backlight_level; 28 int max_backlight_level;
27 int (*set_backlight)(struct omap_dss_device *dssdev, int level); 29 int (*set_backlight)(struct omap_dss_device *dssdev, int level);
28 int (*get_backlight)(struct omap_dss_device *dssdev); 30 int (*get_backlight)(struct omap_dss_device *dssdev);
29}; 31};
30 32
31#endif /* __ARCH_ARM_PLAT_OMAP_NOKIA_DSI_PANEL_H */ 33#endif /* __OMAP_NOKIA_DSI_PANEL_H */
diff --git a/arch/arm/plat-omap/include/plat/display.h b/include/video/omapdss.h
index 5e04ddc18fa8..892b97f8e157 100644
--- a/arch/arm/plat-omap/include/plat/display.h
+++ b/include/video/omapdss.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/include/asm-arm/arch-omap/display.h
3 *
4 * Copyright (C) 2008 Nokia Corporation 2 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> 3 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 * 4 *
@@ -17,8 +15,8 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 16 */
19 17
20#ifndef __ASM_ARCH_OMAP_DISPLAY_H 18#ifndef __OMAP_OMAPDSS_H
21#define __ASM_ARCH_OMAP_DISPLAY_H 19#define __OMAP_OMAPDSS_H
22 20
23#include <linux/list.h> 21#include <linux/list.h>
24#include <linux/kobject.h> 22#include <linux/kobject.h>
@@ -88,6 +86,11 @@ enum omap_color_mode {
88 OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */ 86 OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
89 OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */ 87 OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
90 OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */ 88 OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
89 OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */
90 OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */
91 OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */
92 OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */
93 OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */
91}; 94};
92 95
93enum omap_lcd_display_type { 96enum omap_lcd_display_type {
@@ -174,6 +177,17 @@ enum omap_overlay_manager_caps {
174 OMAP_DSS_OVL_MGR_CAP_DISPC = 1 << 0, 177 OMAP_DSS_OVL_MGR_CAP_DISPC = 1 << 0,
175}; 178};
176 179
180enum omap_dss_clk_source {
181 OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
182 * OMAP4: DSS_FCLK */
183 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
184 * OMAP4: PLL1_CLK1 */
185 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
186 * OMAP4: PLL1_CLK2 */
187 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
188 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
189};
190
177/* RFBI */ 191/* RFBI */
178 192
179struct rfbi_timings { 193struct rfbi_timings {
@@ -205,20 +219,30 @@ int omap_rfbi_enable_te(bool enable, unsigned line);
205int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode, 219int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
206 unsigned hs_pulse_time, unsigned vs_pulse_time, 220 unsigned hs_pulse_time, unsigned vs_pulse_time,
207 int hs_pol_inv, int vs_pol_inv, int extif_div); 221 int hs_pol_inv, int vs_pol_inv, int extif_div);
222void rfbi_bus_lock(void);
223void rfbi_bus_unlock(void);
208 224
209/* DSI */ 225/* DSI */
210void dsi_bus_lock(void); 226void dsi_bus_lock(struct omap_dss_device *dssdev);
211void dsi_bus_unlock(void); 227void dsi_bus_unlock(struct omap_dss_device *dssdev);
212int dsi_vc_dcs_write(int channel, u8 *data, int len); 228int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
213int dsi_vc_dcs_write_0(int channel, u8 dcs_cmd); 229 int len);
214int dsi_vc_dcs_write_1(int channel, u8 dcs_cmd, u8 param); 230int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel,
215int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len); 231 u8 dcs_cmd);
216int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen); 232int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
217int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data); 233 u8 param);
218int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u8 *data1, u8 *data2); 234int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
219int dsi_vc_set_max_rx_packet_size(int channel, u16 len); 235 u8 *data, int len);
220int dsi_vc_send_null(int channel); 236int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
221int dsi_vc_send_bta_sync(int channel); 237 u8 *buf, int buflen);
238int dsi_vc_dcs_read_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
239 u8 *data);
240int dsi_vc_dcs_read_2(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
241 u8 *data1, u8 *data2);
242int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
243 u16 len);
244int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
245int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel);
222 246
223/* Board specific data */ 247/* Board specific data */
224struct omap_dss_board_info { 248struct omap_dss_board_info {
@@ -226,6 +250,7 @@ struct omap_dss_board_info {
226 int num_devices; 250 int num_devices;
227 struct omap_dss_device **devices; 251 struct omap_dss_device **devices;
228 struct omap_dss_device *default_device; 252 struct omap_dss_device *default_device;
253 void (*dsi_mux_pads)(bool enable);
229}; 254};
230 255
231#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS) 256#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
@@ -280,6 +305,7 @@ struct omap_overlay_info {
280 305
281 u32 paddr; 306 u32 paddr;
282 void __iomem *vaddr; 307 void __iomem *vaddr;
308 u32 p_uv_addr; /* for NV12 format */
283 u16 screen_width; 309 u16 screen_width;
284 u16 width; 310 u16 width;
285 u16 height; 311 u16 height;
@@ -400,18 +426,12 @@ struct omap_dss_device {
400 u8 data1_pol; 426 u8 data1_pol;
401 u8 data2_lane; 427 u8 data2_lane;
402 u8 data2_pol; 428 u8 data2_pol;
429 u8 data3_lane;
430 u8 data3_pol;
431 u8 data4_lane;
432 u8 data4_pol;
403 433
404 struct { 434 int module;
405 u16 regn;
406 u16 regm;
407 u16 regm_dispc;
408 u16 regm_dsi;
409
410 u16 lp_clk_div;
411
412 u16 lck_div;
413 u16 pck_div;
414 } div;
415 435
416 bool ext_te; 436 bool ext_te;
417 u8 ext_te_gpio; 437 u8 ext_te_gpio;
@@ -424,6 +444,33 @@ struct omap_dss_device {
424 } phy; 444 } phy;
425 445
426 struct { 446 struct {
447 struct {
448 struct {
449 u16 lck_div;
450 u16 pck_div;
451 enum omap_dss_clk_source lcd_clk_src;
452 } channel;
453
454 enum omap_dss_clk_source dispc_fclk_src;
455 } dispc;
456
457 struct {
458 u16 regn;
459 u16 regm;
460 u16 regm_dispc;
461 u16 regm_dsi;
462
463 u16 lp_clk_div;
464 enum omap_dss_clk_source dsi_fclk_src;
465 } dsi;
466
467 struct {
468 u16 regn;
469 u16 regm2;
470 } hdmi;
471 } clocks;
472
473 struct {
427 struct omap_video_timings timings; 474 struct omap_video_timings timings;
428 475
429 int acbi; /* ac-bias pin transitions per interrupt */ 476 int acbi; /* ac-bias pin transitions per interrupt */
@@ -503,6 +550,8 @@ struct omap_dss_driver {
503 550
504 void (*get_resolution)(struct omap_dss_device *dssdev, 551 void (*get_resolution)(struct omap_dss_device *dssdev,
505 u16 *xres, u16 *yres); 552 u16 *xres, u16 *yres);
553 void (*get_dimensions)(struct omap_dss_device *dssdev,
554 u32 *width, u32 *height);
506 int (*get_recommended_bpp)(struct omap_dss_device *dssdev); 555 int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
507 556
508 int (*check_timings)(struct omap_dss_device *dssdev, 557 int (*check_timings)(struct omap_dss_device *dssdev,
@@ -519,9 +568,6 @@ struct omap_dss_driver {
519int omap_dss_register_driver(struct omap_dss_driver *); 568int omap_dss_register_driver(struct omap_dss_driver *);
520void omap_dss_unregister_driver(struct omap_dss_driver *); 569void omap_dss_unregister_driver(struct omap_dss_driver *);
521 570
522int omap_dss_register_device(struct omap_dss_device *);
523void omap_dss_unregister_device(struct omap_dss_device *);
524
525void omap_dss_get_device(struct omap_dss_device *dssdev); 571void omap_dss_get_device(struct omap_dss_device *dssdev);
526void omap_dss_put_device(struct omap_dss_device *dssdev); 572void omap_dss_put_device(struct omap_dss_device *dssdev);
527#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL) 573#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
@@ -553,7 +599,8 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
553#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver) 599#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver)
554#define to_dss_device(x) container_of((x), struct omap_dss_device, dev) 600#define to_dss_device(x) container_of((x), struct omap_dss_device, dev)
555 601
556void omapdss_dsi_vc_enable_hs(int channel, bool enable); 602void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
603 bool enable);
557int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable); 604int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable);
558 605
559int omap_dsi_prepare_update(struct omap_dss_device *dssdev, 606int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
@@ -568,7 +615,8 @@ int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id);
568void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel); 615void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel);
569 616
570int omapdss_dsi_display_enable(struct omap_dss_device *dssdev); 617int omapdss_dsi_display_enable(struct omap_dss_device *dssdev);
571void omapdss_dsi_display_disable(struct omap_dss_device *dssdev); 618void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
619 bool disconnect_lanes, bool enter_ulps);
572 620
573int omapdss_dpi_display_enable(struct omap_dss_device *dssdev); 621int omapdss_dpi_display_enable(struct omap_dss_device *dssdev);
574void omapdss_dpi_display_disable(struct omap_dss_device *dssdev); 622void omapdss_dpi_display_disable(struct omap_dss_device *dssdev);
@@ -587,5 +635,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
587int omap_rfbi_update(struct omap_dss_device *dssdev, 635int omap_rfbi_update(struct omap_dss_device *dssdev,
588 u16 x, u16 y, u16 w, u16 h, 636 u16 x, u16 y, u16 w, u16 h,
589 void (*callback)(void *), void *data); 637 void (*callback)(void *), void *data);
638int omap_rfbi_configure(struct omap_dss_device *dssdev, int pixel_size,
639 int data_lines);
590 640
591#endif 641#endif
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
index 2c8d369190b3..d964e68fc61d 100644
--- a/include/video/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -2,6 +2,7 @@
2#define __ASM_SH_MOBILE_LCDC_H__ 2#define __ASM_SH_MOBILE_LCDC_H__
3 3
4#include <linux/fb.h> 4#include <linux/fb.h>
5#include <video/sh_mobile_meram.h>
5 6
6enum { 7enum {
7 RGB8, /* 24bpp, 8:8:8 */ 8 RGB8, /* 24bpp, 8:8:8 */
@@ -87,11 +88,13 @@ struct sh_mobile_lcdc_chan_cfg {
87 struct sh_mobile_lcdc_bl_info bl_info; 88 struct sh_mobile_lcdc_bl_info bl_info;
88 struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */ 89 struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */
89 int nonstd; 90 int nonstd;
91 struct sh_mobile_meram_cfg *meram_cfg;
90}; 92};
91 93
92struct sh_mobile_lcdc_info { 94struct sh_mobile_lcdc_info {
93 int clock_source; 95 int clock_source;
94 struct sh_mobile_lcdc_chan_cfg ch[2]; 96 struct sh_mobile_lcdc_chan_cfg ch[2];
97 struct sh_mobile_meram_info *meram_dev;
95}; 98};
96 99
97#endif /* __ASM_SH_MOBILE_LCDC_H__ */ 100#endif /* __ASM_SH_MOBILE_LCDC_H__ */
diff --git a/include/video/sh_mobile_meram.h b/include/video/sh_mobile_meram.h
new file mode 100644
index 000000000000..af602d602b28
--- /dev/null
+++ b/include/video/sh_mobile_meram.h
@@ -0,0 +1,68 @@
1#ifndef __VIDEO_SH_MOBILE_MERAM_H__
2#define __VIDEO_SH_MOBILE_MERAM_H__
3
4/* For sh_mobile_meram_info.addr_mode */
5enum {
6 SH_MOBILE_MERAM_MODE0 = 0,
7 SH_MOBILE_MERAM_MODE1
8};
9
10enum {
11 SH_MOBILE_MERAM_PF_NV = 0,
12 SH_MOBILE_MERAM_PF_RGB,
13 SH_MOBILE_MERAM_PF_NV24
14};
15
16
17struct sh_mobile_meram_priv;
18struct sh_mobile_meram_ops;
19
20struct sh_mobile_meram_info {
21 int addr_mode;
22 struct sh_mobile_meram_ops *ops;
23 struct sh_mobile_meram_priv *priv;
24 struct platform_device *pdev;
25};
26
27/* icb config */
28struct sh_mobile_meram_icb {
29 int marker_icb; /* ICB # for Marker ICB */
30 int cache_icb; /* ICB # for Cache ICB */
31 int meram_offset; /* MERAM Buffer Offset to use */
32 int meram_size; /* MERAM Buffer Size to use */
33
34 int cache_unit; /* bytes to cache per ICB */
35};
36
37struct sh_mobile_meram_cfg {
38 struct sh_mobile_meram_icb icb[2];
39 int pixelformat;
40 int current_reg;
41};
42
43struct module;
44struct sh_mobile_meram_ops {
45 struct module *module;
46 /* register usage of meram */
47 int (*meram_register)(struct sh_mobile_meram_info *meram_dev,
48 struct sh_mobile_meram_cfg *cfg,
49 int xres, int yres, int pixelformat,
50 unsigned long base_addr_y,
51 unsigned long base_addr_c,
52 unsigned long *icb_addr_y,
53 unsigned long *icb_addr_c, int *pitch);
54
55 /* unregister usage of meram */
56 int (*meram_unregister)(struct sh_mobile_meram_info *meram_dev,
57 struct sh_mobile_meram_cfg *cfg);
58
59 /* update meram settings */
60 int (*meram_update)(struct sh_mobile_meram_info *meram_dev,
61 struct sh_mobile_meram_cfg *cfg,
62 unsigned long base_addr_y,
63 unsigned long base_addr_c,
64 unsigned long *icb_addr_y,
65 unsigned long *icb_addr_c);
66};
67
68#endif /* __VIDEO_SH_MOBILE_MERAM_H__ */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 61e523af3c46..3d5d6db864fe 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -45,6 +45,19 @@ typedef uint64_t blkif_sector_t;
45#define BLKIF_OP_WRITE_BARRIER 2 45#define BLKIF_OP_WRITE_BARRIER 2
46 46
47/* 47/*
48 * Recognised if "feature-flush-cache" is present in backend xenbus
49 * info. A flush will ask the underlying storage hardware to flush its
50 * non-volatile caches as appropriate. The "feature-flush-cache" node
51 * contains a boolean indicating whether flush requests are likely to
52 * succeed or fail. Either way, a flush request may fail at any time
53 * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
54 * block-device hardware. The boolean simply indicates whether or not it
55 * is worthwhile for the frontend to attempt flushes. If a backend does
56 * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
57 * "feature-flush-cache" node!
58 */
59#define BLKIF_OP_FLUSH_DISKCACHE 3
60/*
48 * Maximum scatter/gather segments per request. 61 * Maximum scatter/gather segments per request.
49 * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. 62 * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
50 * NB. This could be 12 if the ring indexes weren't stored in the same page. 63 * NB. This could be 12 if the ring indexes weren't stored in the same page.
diff --git a/init/calibrate.c b/init/calibrate.c
index 76ac9194cbc4..cfd7000c9d71 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -38,6 +38,9 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
38 unsigned long timer_rate_min, timer_rate_max; 38 unsigned long timer_rate_min, timer_rate_max;
39 unsigned long good_timer_sum = 0; 39 unsigned long good_timer_sum = 0;
40 unsigned long good_timer_count = 0; 40 unsigned long good_timer_count = 0;
41 unsigned long measured_times[MAX_DIRECT_CALIBRATION_RETRIES];
42 int max = -1; /* index of measured_times with max/min values or not set */
43 int min = -1;
41 int i; 44 int i;
42 45
43 if (read_current_timer(&pre_start) < 0 ) 46 if (read_current_timer(&pre_start) < 0 )
@@ -90,18 +93,78 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
90 * If the upper limit and lower limit of the timer_rate is 93 * If the upper limit and lower limit of the timer_rate is
91 * >= 12.5% apart, redo calibration. 94 * >= 12.5% apart, redo calibration.
92 */ 95 */
93 if (pre_start != 0 && pre_end != 0 && 96 printk(KERN_DEBUG "calibrate_delay_direct() timer_rate_max=%lu "
97 "timer_rate_min=%lu pre_start=%lu pre_end=%lu\n",
98 timer_rate_max, timer_rate_min, pre_start, pre_end);
99 if (start >= post_end)
100 printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
101 "timer_rate as we had a TSC wrap around"
102 " start=%lu >=post_end=%lu\n",
103 start, post_end);
104 if (start < post_end && pre_start != 0 && pre_end != 0 &&
94 (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) { 105 (timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) {
95 good_timer_count++; 106 good_timer_count++;
96 good_timer_sum += timer_rate_max; 107 good_timer_sum += timer_rate_max;
97 } 108 measured_times[i] = timer_rate_max;
109 if (max < 0 || timer_rate_max > measured_times[max])
110 max = i;
111 if (min < 0 || timer_rate_max < measured_times[min])
112 min = i;
113 } else
114 measured_times[i] = 0;
115
98 } 116 }
99 117
100 if (good_timer_count) 118 /*
101 return (good_timer_sum/good_timer_count); 119 * Find the maximum & minimum - if they differ too much throw out the
120 * one with the largest difference from the mean and try again...
121 */
122 while (good_timer_count > 1) {
123 unsigned long estimate;
124 unsigned long maxdiff;
125
126 /* compute the estimate */
127 estimate = (good_timer_sum/good_timer_count);
128 maxdiff = estimate >> 3;
129
130 /* if range is within 12% let's take it */
131 if ((measured_times[max] - measured_times[min]) < maxdiff)
132 return estimate;
133
134 /* ok - drop the worse value and try again... */
135 good_timer_sum = 0;
136 good_timer_count = 0;
137 if ((measured_times[max] - estimate) <
138 (estimate - measured_times[min])) {
139 printk(KERN_NOTICE "calibrate_delay_direct() dropping "
140 "min bogoMips estimate %d = %lu\n",
141 min, measured_times[min]);
142 measured_times[min] = 0;
143 min = max;
144 } else {
145 printk(KERN_NOTICE "calibrate_delay_direct() dropping "
146 "max bogoMips estimate %d = %lu\n",
147 max, measured_times[max]);
148 measured_times[max] = 0;
149 max = min;
150 }
151
152 for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
153 if (measured_times[i] == 0)
154 continue;
155 good_timer_count++;
156 good_timer_sum += measured_times[i];
157 if (measured_times[i] < measured_times[min])
158 min = i;
159 if (measured_times[i] > measured_times[max])
160 max = i;
161 }
162
163 }
102 164
103 printk(KERN_WARNING "calibrate_delay_direct() failed to get a good " 165 printk(KERN_NOTICE "calibrate_delay_direct() failed to get a good "
104 "estimate for loops_per_jiffy.\nProbably due to long platform interrupts. Consider using \"lpj=\" boot option.\n"); 166 "estimate for loops_per_jiffy.\nProbably due to long platform "
167 "interrupts. Consider using \"lpj=\" boot option.\n");
105 return 0; 168 return 0;
106} 169}
107#else 170#else
diff --git a/init/main.c b/init/main.c
index 48df882d51d2..d2f1e086bf33 100644
--- a/init/main.c
+++ b/init/main.c
@@ -504,11 +504,14 @@ asmlinkage void __init start_kernel(void)
504 * These use large bootmem allocations and must precede 504 * These use large bootmem allocations and must precede
505 * kmem_cache_init() 505 * kmem_cache_init()
506 */ 506 */
507 setup_log_buf(0);
507 pidhash_init(); 508 pidhash_init();
508 vfs_caches_init_early(); 509 vfs_caches_init_early();
509 sort_main_extable(); 510 sort_main_extable();
510 trap_init(); 511 trap_init();
511 mm_init(); 512 mm_init();
513 BUG_ON(mm_init_cpumask(&init_mm, 0));
514
512 /* 515 /*
513 * Set up the scheduler prior starting any interrupts (such as the 516 * Set up the scheduler prior starting any interrupts (such as the
514 * timer interrupt). Full topology setup happens at smp_init() 517 * timer interrupt). Full topology setup happens at smp_init()
diff --git a/kernel/compat.c b/kernel/compat.c
index 9214dcd087b7..fc9eb093acd5 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -293,6 +293,8 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
293 return compat_jiffies_to_clock_t(jiffies); 293 return compat_jiffies_to_clock_t(jiffies);
294} 294}
295 295
296#ifdef __ARCH_WANT_SYS_SIGPENDING
297
296/* 298/*
297 * Assumption: old_sigset_t and compat_old_sigset_t are both 299 * Assumption: old_sigset_t and compat_old_sigset_t are both
298 * types that can be passed to put_user()/get_user(). 300 * types that can be passed to put_user()/get_user().
@@ -312,6 +314,10 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
312 return ret; 314 return ret;
313} 315}
314 316
317#endif
318
319#ifdef __ARCH_WANT_SYS_SIGPROCMASK
320
315asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, 321asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
316 compat_old_sigset_t __user *oset) 322 compat_old_sigset_t __user *oset)
317{ 323{
@@ -333,6 +339,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
333 return ret; 339 return ret;
334} 340}
335 341
342#endif
343
336asmlinkage long compat_sys_setrlimit(unsigned int resource, 344asmlinkage long compat_sys_setrlimit(unsigned int resource,
337 struct compat_rlimit __user *rlim) 345 struct compat_rlimit __user *rlim)
338{ 346{
diff --git a/kernel/fork.c b/kernel/fork.c
index 2b44d82b8237..8e7e135d0817 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -383,15 +383,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
383 get_file(file); 383 get_file(file);
384 if (tmp->vm_flags & VM_DENYWRITE) 384 if (tmp->vm_flags & VM_DENYWRITE)
385 atomic_dec(&inode->i_writecount); 385 atomic_dec(&inode->i_writecount);
386 spin_lock(&mapping->i_mmap_lock); 386 mutex_lock(&mapping->i_mmap_mutex);
387 if (tmp->vm_flags & VM_SHARED) 387 if (tmp->vm_flags & VM_SHARED)
388 mapping->i_mmap_writable++; 388 mapping->i_mmap_writable++;
389 tmp->vm_truncate_count = mpnt->vm_truncate_count;
390 flush_dcache_mmap_lock(mapping); 389 flush_dcache_mmap_lock(mapping);
391 /* insert tmp into the share list, just after mpnt */ 390 /* insert tmp into the share list, just after mpnt */
392 vma_prio_tree_add(tmp, mpnt); 391 vma_prio_tree_add(tmp, mpnt);
393 flush_dcache_mmap_unlock(mapping); 392 flush_dcache_mmap_unlock(mapping);
394 spin_unlock(&mapping->i_mmap_lock); 393 mutex_unlock(&mapping->i_mmap_mutex);
395 } 394 }
396 395
397 /* 396 /*
@@ -486,6 +485,20 @@ static void mm_init_aio(struct mm_struct *mm)
486#endif 485#endif
487} 486}
488 487
488int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm)
489{
490#ifdef CONFIG_CPUMASK_OFFSTACK
491 if (!alloc_cpumask_var(&mm->cpu_vm_mask_var, GFP_KERNEL))
492 return -ENOMEM;
493
494 if (oldmm)
495 cpumask_copy(mm_cpumask(mm), mm_cpumask(oldmm));
496 else
497 memset(mm_cpumask(mm), 0, cpumask_size());
498#endif
499 return 0;
500}
501
489static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) 502static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
490{ 503{
491 atomic_set(&mm->mm_users, 1); 504 atomic_set(&mm->mm_users, 1);
@@ -522,10 +535,20 @@ struct mm_struct * mm_alloc(void)
522 struct mm_struct * mm; 535 struct mm_struct * mm;
523 536
524 mm = allocate_mm(); 537 mm = allocate_mm();
525 if (mm) { 538 if (!mm)
526 memset(mm, 0, sizeof(*mm)); 539 return NULL;
527 mm = mm_init(mm, current); 540
541 memset(mm, 0, sizeof(*mm));
542 mm = mm_init(mm, current);
543 if (!mm)
544 return NULL;
545
546 if (mm_init_cpumask(mm, NULL)) {
547 mm_free_pgd(mm);
548 free_mm(mm);
549 return NULL;
528 } 550 }
551
529 return mm; 552 return mm;
530} 553}
531 554
@@ -537,6 +560,7 @@ struct mm_struct * mm_alloc(void)
537void __mmdrop(struct mm_struct *mm) 560void __mmdrop(struct mm_struct *mm)
538{ 561{
539 BUG_ON(mm == &init_mm); 562 BUG_ON(mm == &init_mm);
563 free_cpumask_var(mm->cpu_vm_mask_var);
540 mm_free_pgd(mm); 564 mm_free_pgd(mm);
541 destroy_context(mm); 565 destroy_context(mm);
542 mmu_notifier_mm_destroy(mm); 566 mmu_notifier_mm_destroy(mm);
@@ -691,6 +715,9 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
691 if (!mm_init(mm, tsk)) 715 if (!mm_init(mm, tsk))
692 goto fail_nomem; 716 goto fail_nomem;
693 717
718 if (mm_init_cpumask(mm, oldmm))
719 goto fail_nocpumask;
720
694 if (init_new_context(tsk, mm)) 721 if (init_new_context(tsk, mm))
695 goto fail_nocontext; 722 goto fail_nocontext;
696 723
@@ -717,6 +744,9 @@ fail_nomem:
717 return NULL; 744 return NULL;
718 745
719fail_nocontext: 746fail_nocontext:
747 free_cpumask_var(mm->cpu_vm_mask_var);
748
749fail_nocpumask:
720 /* 750 /*
721 * If init_new_context() failed, we cannot use mmput() to free the mm 751 * If init_new_context() failed, we cannot use mmput() to free the mm
722 * because it calls destroy_context() 752 * because it calls destroy_context()
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c541ee527ecb..a9205e32a059 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -748,7 +748,7 @@ static inline void retrigger_next_event(void *arg) { }
748 */ 748 */
749void clock_was_set(void) 749void clock_was_set(void)
750{ 750{
751#ifdef CONFIG_HIGHRES_TIMERS 751#ifdef CONFIG_HIGH_RES_TIMERS
752 /* Retrigger the CPU local events everywhere */ 752 /* Retrigger the CPU local events everywhere */
753 on_each_cpu(retrigger_next_event, NULL, 1); 753 on_each_cpu(retrigger_next_event, NULL, 1);
754#endif 754#endif
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 834899f2500f..64e3df6ab1ef 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -19,7 +19,7 @@ static struct proc_dir_entry *root_irq_dir;
19 19
20#ifdef CONFIG_SMP 20#ifdef CONFIG_SMP
21 21
22static int irq_affinity_proc_show(struct seq_file *m, void *v) 22static int show_irq_affinity(int type, struct seq_file *m, void *v)
23{ 23{
24 struct irq_desc *desc = irq_to_desc((long)m->private); 24 struct irq_desc *desc = irq_to_desc((long)m->private);
25 const struct cpumask *mask = desc->irq_data.affinity; 25 const struct cpumask *mask = desc->irq_data.affinity;
@@ -28,7 +28,10 @@ static int irq_affinity_proc_show(struct seq_file *m, void *v)
28 if (irqd_is_setaffinity_pending(&desc->irq_data)) 28 if (irqd_is_setaffinity_pending(&desc->irq_data))
29 mask = desc->pending_mask; 29 mask = desc->pending_mask;
30#endif 30#endif
31 seq_cpumask(m, mask); 31 if (type)
32 seq_cpumask_list(m, mask);
33 else
34 seq_cpumask(m, mask);
32 seq_putc(m, '\n'); 35 seq_putc(m, '\n');
33 return 0; 36 return 0;
34} 37}
@@ -59,7 +62,18 @@ static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
59#endif 62#endif
60 63
61int no_irq_affinity; 64int no_irq_affinity;
62static ssize_t irq_affinity_proc_write(struct file *file, 65static int irq_affinity_proc_show(struct seq_file *m, void *v)
66{
67 return show_irq_affinity(0, m, v);
68}
69
70static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
71{
72 return show_irq_affinity(1, m, v);
73}
74
75
76static ssize_t write_irq_affinity(int type, struct file *file,
63 const char __user *buffer, size_t count, loff_t *pos) 77 const char __user *buffer, size_t count, loff_t *pos)
64{ 78{
65 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; 79 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
@@ -72,7 +86,10 @@ static ssize_t irq_affinity_proc_write(struct file *file,
72 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 86 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
73 return -ENOMEM; 87 return -ENOMEM;
74 88
75 err = cpumask_parse_user(buffer, count, new_value); 89 if (type)
90 err = cpumask_parselist_user(buffer, count, new_value);
91 else
92 err = cpumask_parse_user(buffer, count, new_value);
76 if (err) 93 if (err)
77 goto free_cpumask; 94 goto free_cpumask;
78 95
@@ -100,11 +117,28 @@ free_cpumask:
100 return err; 117 return err;
101} 118}
102 119
120static ssize_t irq_affinity_proc_write(struct file *file,
121 const char __user *buffer, size_t count, loff_t *pos)
122{
123 return write_irq_affinity(0, file, buffer, count, pos);
124}
125
126static ssize_t irq_affinity_list_proc_write(struct file *file,
127 const char __user *buffer, size_t count, loff_t *pos)
128{
129 return write_irq_affinity(1, file, buffer, count, pos);
130}
131
103static int irq_affinity_proc_open(struct inode *inode, struct file *file) 132static int irq_affinity_proc_open(struct inode *inode, struct file *file)
104{ 133{
105 return single_open(file, irq_affinity_proc_show, PDE(inode)->data); 134 return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
106} 135}
107 136
137static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
138{
139 return single_open(file, irq_affinity_list_proc_show, PDE(inode)->data);
140}
141
108static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) 142static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
109{ 143{
110 return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data); 144 return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data);
@@ -125,6 +159,14 @@ static const struct file_operations irq_affinity_hint_proc_fops = {
125 .release = single_release, 159 .release = single_release,
126}; 160};
127 161
162static const struct file_operations irq_affinity_list_proc_fops = {
163 .open = irq_affinity_list_proc_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167 .write = irq_affinity_list_proc_write,
168};
169
128static int default_affinity_show(struct seq_file *m, void *v) 170static int default_affinity_show(struct seq_file *m, void *v)
129{ 171{
130 seq_cpumask(m, irq_default_affinity); 172 seq_cpumask(m, irq_default_affinity);
@@ -289,6 +331,10 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
289 proc_create_data("affinity_hint", 0400, desc->dir, 331 proc_create_data("affinity_hint", 0400, desc->dir,
290 &irq_affinity_hint_proc_fops, (void *)(long)irq); 332 &irq_affinity_hint_proc_fops, (void *)(long)irq);
291 333
334 /* create /proc/irq/<irq>/smp_affinity_list */
335 proc_create_data("smp_affinity_list", 0600, desc->dir,
336 &irq_affinity_list_proc_fops, (void *)(long)irq);
337
292 proc_create_data("node", 0444, desc->dir, 338 proc_create_data("node", 0444, desc->dir,
293 &irq_node_proc_fops, (void *)(long)irq); 339 &irq_node_proc_fops, (void *)(long)irq);
294#endif 340#endif
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 2c938e2337cd..d607ed5dd441 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -131,14 +131,14 @@ EXPORT_SYMBOL(mutex_unlock);
131 */ 131 */
132static inline int __sched 132static inline int __sched
133__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 133__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
134 unsigned long ip) 134 struct lockdep_map *nest_lock, unsigned long ip)
135{ 135{
136 struct task_struct *task = current; 136 struct task_struct *task = current;
137 struct mutex_waiter waiter; 137 struct mutex_waiter waiter;
138 unsigned long flags; 138 unsigned long flags;
139 139
140 preempt_disable(); 140 preempt_disable();
141 mutex_acquire(&lock->dep_map, subclass, 0, ip); 141 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
142 142
143#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 143#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
144 /* 144 /*
@@ -269,16 +269,25 @@ void __sched
269mutex_lock_nested(struct mutex *lock, unsigned int subclass) 269mutex_lock_nested(struct mutex *lock, unsigned int subclass)
270{ 270{
271 might_sleep(); 271 might_sleep();
272 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); 272 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
273} 273}
274 274
275EXPORT_SYMBOL_GPL(mutex_lock_nested); 275EXPORT_SYMBOL_GPL(mutex_lock_nested);
276 276
277void __sched
278_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
279{
280 might_sleep();
281 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
282}
283
284EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
285
277int __sched 286int __sched
278mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 287mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
279{ 288{
280 might_sleep(); 289 might_sleep();
281 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); 290 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
282} 291}
283EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 292EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
284 293
@@ -287,7 +296,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
287{ 296{
288 might_sleep(); 297 might_sleep();
289 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 298 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
290 subclass, _RET_IP_); 299 subclass, NULL, _RET_IP_);
291} 300}
292 301
293EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 302EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -393,7 +402,7 @@ __mutex_lock_slowpath(atomic_t *lock_count)
393{ 402{
394 struct mutex *lock = container_of(lock_count, struct mutex, count); 403 struct mutex *lock = container_of(lock_count, struct mutex, count);
395 404
396 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); 405 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
397} 406}
398 407
399static noinline int __sched 408static noinline int __sched
@@ -401,7 +410,7 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count)
401{ 410{
402 struct mutex *lock = container_of(lock_count, struct mutex, count); 411 struct mutex *lock = container_of(lock_count, struct mutex, count);
403 412
404 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); 413 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
405} 414}
406 415
407static noinline int __sched 416static noinline int __sched
@@ -409,7 +418,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
409{ 418{
410 struct mutex *lock = container_of(lock_count, struct mutex, count); 419 struct mutex *lock = container_of(lock_count, struct mutex, count);
411 420
412 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); 421 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
413} 422}
414#endif 423#endif
415 424
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index a1b5edf1bf92..4556182527f3 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -491,6 +491,13 @@ static struct k_itimer * alloc_posix_timer(void)
491 return tmr; 491 return tmr;
492} 492}
493 493
494static void k_itimer_rcu_free(struct rcu_head *head)
495{
496 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
497
498 kmem_cache_free(posix_timers_cache, tmr);
499}
500
494#define IT_ID_SET 1 501#define IT_ID_SET 1
495#define IT_ID_NOT_SET 0 502#define IT_ID_NOT_SET 0
496static void release_posix_timer(struct k_itimer *tmr, int it_id_set) 503static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
@@ -503,7 +510,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
503 } 510 }
504 put_pid(tmr->it_pid); 511 put_pid(tmr->it_pid);
505 sigqueue_free(tmr->sigq); 512 sigqueue_free(tmr->sigq);
506 kmem_cache_free(posix_timers_cache, tmr); 513 call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
507} 514}
508 515
509static struct k_clock *clockid_to_kclock(const clockid_t id) 516static struct k_clock *clockid_to_kclock(const clockid_t id)
@@ -631,22 +638,18 @@ out:
631static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) 638static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
632{ 639{
633 struct k_itimer *timr; 640 struct k_itimer *timr;
634 /* 641
635 * Watch out here. We do a irqsave on the idr_lock and pass the 642 rcu_read_lock();
636 * flags part over to the timer lock. Must not let interrupts in
637 * while we are moving the lock.
638 */
639 spin_lock_irqsave(&idr_lock, *flags);
640 timr = idr_find(&posix_timers_id, (int)timer_id); 643 timr = idr_find(&posix_timers_id, (int)timer_id);
641 if (timr) { 644 if (timr) {
642 spin_lock(&timr->it_lock); 645 spin_lock_irqsave(&timr->it_lock, *flags);
643 if (timr->it_signal == current->signal) { 646 if (timr->it_signal == current->signal) {
644 spin_unlock(&idr_lock); 647 rcu_read_unlock();
645 return timr; 648 return timr;
646 } 649 }
647 spin_unlock(&timr->it_lock); 650 spin_unlock_irqrestore(&timr->it_lock, *flags);
648 } 651 }
649 spin_unlock_irqrestore(&idr_lock, *flags); 652 rcu_read_unlock();
650 653
651 return NULL; 654 return NULL;
652} 655}
diff --git a/kernel/printk.c b/kernel/printk.c
index da8ca817eae3..35185392173f 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -31,6 +31,7 @@
31#include <linux/smp.h> 31#include <linux/smp.h>
32#include <linux/security.h> 32#include <linux/security.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/memblock.h>
34#include <linux/syscalls.h> 35#include <linux/syscalls.h>
35#include <linux/kexec.h> 36#include <linux/kexec.h>
36#include <linux/kdb.h> 37#include <linux/kdb.h>
@@ -167,46 +168,74 @@ void log_buf_kexec_setup(void)
167} 168}
168#endif 169#endif
169 170
171/* requested log_buf_len from kernel cmdline */
172static unsigned long __initdata new_log_buf_len;
173
174/* save requested log_buf_len since it's too early to process it */
170static int __init log_buf_len_setup(char *str) 175static int __init log_buf_len_setup(char *str)
171{ 176{
172 unsigned size = memparse(str, &str); 177 unsigned size = memparse(str, &str);
173 unsigned long flags;
174 178
175 if (size) 179 if (size)
176 size = roundup_pow_of_two(size); 180 size = roundup_pow_of_two(size);
177 if (size > log_buf_len) { 181 if (size > log_buf_len)
178 unsigned start, dest_idx, offset; 182 new_log_buf_len = size;
179 char *new_log_buf;
180 183
181 new_log_buf = alloc_bootmem(size); 184 return 0;
182 if (!new_log_buf) { 185}
183 printk(KERN_WARNING "log_buf_len: allocation failed\n"); 186early_param("log_buf_len", log_buf_len_setup);
184 goto out;
185 }
186 187
187 spin_lock_irqsave(&logbuf_lock, flags); 188void __init setup_log_buf(int early)
188 log_buf_len = size; 189{
189 log_buf = new_log_buf; 190 unsigned long flags;
190 191 unsigned start, dest_idx, offset;
191 offset = start = min(con_start, log_start); 192 char *new_log_buf;
192 dest_idx = 0; 193 int free;
193 while (start != log_end) { 194
194 log_buf[dest_idx] = __log_buf[start & (__LOG_BUF_LEN - 1)]; 195 if (!new_log_buf_len)
195 start++; 196 return;
196 dest_idx++; 197
197 } 198 if (early) {
198 log_start -= offset; 199 unsigned long mem;
199 con_start -= offset;
200 log_end -= offset;
201 spin_unlock_irqrestore(&logbuf_lock, flags);
202 200
203 printk(KERN_NOTICE "log_buf_len: %d\n", log_buf_len); 201 mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
202 if (mem == MEMBLOCK_ERROR)
203 return;
204 new_log_buf = __va(mem);
205 } else {
206 new_log_buf = alloc_bootmem_nopanic(new_log_buf_len);
204 } 207 }
205out:
206 return 1;
207}
208 208
209__setup("log_buf_len=", log_buf_len_setup); 209 if (unlikely(!new_log_buf)) {
210 pr_err("log_buf_len: %ld bytes not available\n",
211 new_log_buf_len);
212 return;
213 }
214
215 spin_lock_irqsave(&logbuf_lock, flags);
216 log_buf_len = new_log_buf_len;
217 log_buf = new_log_buf;
218 new_log_buf_len = 0;
219 free = __LOG_BUF_LEN - log_end;
220
221 offset = start = min(con_start, log_start);
222 dest_idx = 0;
223 while (start != log_end) {
224 unsigned log_idx_mask = start & (__LOG_BUF_LEN - 1);
225
226 log_buf[dest_idx] = __log_buf[log_idx_mask];
227 start++;
228 dest_idx++;
229 }
230 log_start -= offset;
231 con_start -= offset;
232 log_end -= offset;
233 spin_unlock_irqrestore(&logbuf_lock, flags);
234
235 pr_info("log_buf_len: %d\n", log_buf_len);
236 pr_info("early log buf free: %d(%d%%)\n",
237 free, (free * 100) / __LOG_BUF_LEN);
238}
210 239
211#ifdef CONFIG_BOOT_PRINTK_DELAY 240#ifdef CONFIG_BOOT_PRINTK_DELAY
212 241
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4bffd62c2f13..4fc92445a29c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1506,7 +1506,7 @@ static struct ctl_table fs_table[] = {
1506 1506
1507static struct ctl_table debug_table[] = { 1507static struct ctl_table debug_table[] = {
1508#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \ 1508#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \
1509 defined(CONFIG_S390) 1509 defined(CONFIG_S390) || defined(CONFIG_TILE)
1510 { 1510 {
1511 .procname = "exception-trace", 1511 .procname = "exception-trace",
1512 .data = &show_unhandled_signals, 1512 .data = &show_unhandled_signals,
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0efcdca9751a..28afa4c5333c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -670,6 +670,15 @@ config STACKTRACE
670 bool 670 bool
671 depends on STACKTRACE_SUPPORT 671 depends on STACKTRACE_SUPPORT
672 672
673config DEBUG_STACK_USAGE
674 bool "Stack utilization instrumentation"
675 depends on DEBUG_KERNEL
676 help
677 Enables the display of the minimum amount of free stack which each
678 task has ever had available in the sysrq-T and sysrq-P debug output.
679
680 This option will slow down process creation somewhat.
681
673config DEBUG_KOBJECT 682config DEBUG_KOBJECT
674 bool "kobject debugging" 683 bool "kobject debugging"
675 depends on DEBUG_KERNEL 684 depends on DEBUG_KERNEL
@@ -983,6 +992,17 @@ config DEBUG_FORCE_WEAK_PER_CPU
983 To ensure that generic code follows the above rules, this 992 To ensure that generic code follows the above rules, this
984 option forces all percpu variables to be defined as weak. 993 option forces all percpu variables to be defined as weak.
985 994
995config DEBUG_PER_CPU_MAPS
996 bool "Debug access to per_cpu maps"
997 depends on DEBUG_KERNEL
998 depends on SMP
999 help
1000 Say Y to verify that the per_cpu map being accessed has
1001 been set up. This adds a fair amount of code to kernel memory
1002 and decreases performance.
1003
1004 Say N if unsure.
1005
986config LKDTM 1006config LKDTM
987 tristate "Linux Kernel Dump Test Tool Module" 1007 tristate "Linux Kernel Dump Test Tool Module"
988 depends on DEBUG_FS 1008 depends on DEBUG_FS
diff --git a/lib/audit.c b/lib/audit.c
index 8e7dc1c63aa9..76bbed4a20e5 100644
--- a/lib/audit.c
+++ b/lib/audit.c
@@ -36,8 +36,10 @@ int audit_classify_arch(int arch)
36int audit_classify_syscall(int abi, unsigned syscall) 36int audit_classify_syscall(int abi, unsigned syscall)
37{ 37{
38 switch(syscall) { 38 switch(syscall) {
39#ifdef __NR_open
39 case __NR_open: 40 case __NR_open:
40 return 2; 41 return 2;
42#endif
41#ifdef __NR_openat 43#ifdef __NR_openat
42 case __NR_openat: 44 case __NR_openat:
43 return 3; 45 return 3;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 91e0ccfdb424..41baf02924e6 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -571,8 +571,11 @@ int bitmap_scnlistprintf(char *buf, unsigned int buflen,
571EXPORT_SYMBOL(bitmap_scnlistprintf); 571EXPORT_SYMBOL(bitmap_scnlistprintf);
572 572
573/** 573/**
574 * bitmap_parselist - convert list format ASCII string to bitmap 574 * __bitmap_parselist - convert list format ASCII string to bitmap
575 * @bp: read nul-terminated user string from this buffer 575 * @bp: read nul-terminated user string from this buffer
576 * @buflen: buffer size in bytes. If string is smaller than this
577 * then it must be terminated with a \0.
578 * @is_user: location of buffer, 0 indicates kernel space
576 * @maskp: write resulting mask here 579 * @maskp: write resulting mask here
577 * @nmaskbits: number of bits in mask to be written 580 * @nmaskbits: number of bits in mask to be written
578 * 581 *
@@ -587,20 +590,63 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
587 * %-EINVAL: invalid character in string 590 * %-EINVAL: invalid character in string
588 * %-ERANGE: bit number specified too large for mask 591 * %-ERANGE: bit number specified too large for mask
589 */ 592 */
590int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) 593static int __bitmap_parselist(const char *buf, unsigned int buflen,
594 int is_user, unsigned long *maskp,
595 int nmaskbits)
591{ 596{
592 unsigned a, b; 597 unsigned a, b;
598 int c, old_c, totaldigits;
599 const char __user *ubuf = buf;
600 int exp_digit, in_range;
593 601
602 totaldigits = c = 0;
594 bitmap_zero(maskp, nmaskbits); 603 bitmap_zero(maskp, nmaskbits);
595 do { 604 do {
596 if (!isdigit(*bp)) 605 exp_digit = 1;
597 return -EINVAL; 606 in_range = 0;
598 b = a = simple_strtoul(bp, (char **)&bp, BASEDEC); 607 a = b = 0;
599 if (*bp == '-') { 608
600 bp++; 609 /* Get the next cpu# or a range of cpu#'s */
601 if (!isdigit(*bp)) 610 while (buflen) {
611 old_c = c;
612 if (is_user) {
613 if (__get_user(c, ubuf++))
614 return -EFAULT;
615 } else
616 c = *buf++;
617 buflen--;
618 if (isspace(c))
619 continue;
620
621 /*
622 * If the last character was a space and the current
623 * character isn't '\0', we've got embedded whitespace.
624 * This is a no-no, so throw an error.
625 */
626 if (totaldigits && c && isspace(old_c))
627 return -EINVAL;
628
629 /* A '\0' or a ',' signal the end of a cpu# or range */
630 if (c == '\0' || c == ',')
631 break;
632
633 if (c == '-') {
634 if (exp_digit || in_range)
635 return -EINVAL;
636 b = 0;
637 in_range = 1;
638 exp_digit = 1;
639 continue;
640 }
641
642 if (!isdigit(c))
602 return -EINVAL; 643 return -EINVAL;
603 b = simple_strtoul(bp, (char **)&bp, BASEDEC); 644
645 b = b * 10 + (c - '0');
646 if (!in_range)
647 a = b;
648 exp_digit = 0;
649 totaldigits++;
604 } 650 }
605 if (!(a <= b)) 651 if (!(a <= b))
606 return -EINVAL; 652 return -EINVAL;
@@ -610,13 +656,52 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
610 set_bit(a, maskp); 656 set_bit(a, maskp);
611 a++; 657 a++;
612 } 658 }
613 if (*bp == ',') 659 } while (buflen && c == ',');
614 bp++;
615 } while (*bp != '\0' && *bp != '\n');
616 return 0; 660 return 0;
617} 661}
662
663int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
664{
665 char *nl = strchr(bp, '\n');
666 int len;
667
668 if (nl)
669 len = nl - bp;
670 else
671 len = strlen(bp);
672
673 return __bitmap_parselist(bp, len, 0, maskp, nmaskbits);
674}
618EXPORT_SYMBOL(bitmap_parselist); 675EXPORT_SYMBOL(bitmap_parselist);
619 676
677
678/**
679 * bitmap_parselist_user()
680 *
681 * @ubuf: pointer to user buffer containing string.
682 * @ulen: buffer size in bytes. If string is smaller than this
683 * then it must be terminated with a \0.
684 * @maskp: pointer to bitmap array that will contain result.
685 * @nmaskbits: size of bitmap, in bits.
686 *
687 * Wrapper for bitmap_parselist(), providing it with user buffer.
688 *
689 * We cannot have this as an inline function in bitmap.h because it needs
690 * linux/uaccess.h to get the access_ok() declaration and this causes
691 * cyclic dependencies.
692 */
693int bitmap_parselist_user(const char __user *ubuf,
694 unsigned int ulen, unsigned long *maskp,
695 int nmaskbits)
696{
697 if (!access_ok(VERIFY_READ, ubuf, ulen))
698 return -EFAULT;
699 return __bitmap_parselist((const char *)ubuf,
700 ulen, 1, maskp, nmaskbits);
701}
702EXPORT_SYMBOL(bitmap_parselist_user);
703
704
620/** 705/**
621 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap 706 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
622 * @buf: pointer to a bitmap 707 * @buf: pointer to a bitmap
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 1923f1490e72..577ddf805975 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -39,17 +39,20 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
39EXPORT_SYMBOL(gen_pool_create); 39EXPORT_SYMBOL(gen_pool_create);
40 40
41/** 41/**
42 * gen_pool_add - add a new chunk of special memory to the pool 42 * gen_pool_add_virt - add a new chunk of special memory to the pool
43 * @pool: pool to add new memory chunk to 43 * @pool: pool to add new memory chunk to
44 * @addr: starting address of memory chunk to add to pool 44 * @virt: virtual starting address of memory chunk to add to pool
45 * @phys: physical starting address of memory chunk to add to pool
45 * @size: size in bytes of the memory chunk to add to pool 46 * @size: size in bytes of the memory chunk to add to pool
46 * @nid: node id of the node the chunk structure and bitmap should be 47 * @nid: node id of the node the chunk structure and bitmap should be
47 * allocated on, or -1 48 * allocated on, or -1
48 * 49 *
49 * Add a new chunk of special memory to the specified pool. 50 * Add a new chunk of special memory to the specified pool.
51 *
52 * Returns 0 on success or a -ve errno on failure.
50 */ 53 */
51int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, 54int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
52 int nid) 55 size_t size, int nid)
53{ 56{
54 struct gen_pool_chunk *chunk; 57 struct gen_pool_chunk *chunk;
55 int nbits = size >> pool->min_alloc_order; 58 int nbits = size >> pool->min_alloc_order;
@@ -58,11 +61,12 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
58 61
59 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); 62 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
60 if (unlikely(chunk == NULL)) 63 if (unlikely(chunk == NULL))
61 return -1; 64 return -ENOMEM;
62 65
63 spin_lock_init(&chunk->lock); 66 spin_lock_init(&chunk->lock);
64 chunk->start_addr = addr; 67 chunk->phys_addr = phys;
65 chunk->end_addr = addr + size; 68 chunk->start_addr = virt;
69 chunk->end_addr = virt + size;
66 70
67 write_lock(&pool->lock); 71 write_lock(&pool->lock);
68 list_add(&chunk->next_chunk, &pool->chunks); 72 list_add(&chunk->next_chunk, &pool->chunks);
@@ -70,7 +74,32 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
70 74
71 return 0; 75 return 0;
72} 76}
73EXPORT_SYMBOL(gen_pool_add); 77EXPORT_SYMBOL(gen_pool_add_virt);
78
79/**
80 * gen_pool_virt_to_phys - return the physical address of memory
81 * @pool: pool to allocate from
82 * @addr: starting address of memory
83 *
84 * Returns the physical address on success, or -1 on error.
85 */
86phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
87{
88 struct list_head *_chunk;
89 struct gen_pool_chunk *chunk;
90
91 read_lock(&pool->lock);
92 list_for_each(_chunk, &pool->chunks) {
93 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
94
95 if (addr >= chunk->start_addr && addr < chunk->end_addr)
96 return chunk->phys_addr + addr - chunk->start_addr;
97 }
98 read_unlock(&pool->lock);
99
100 return -1;
101}
102EXPORT_SYMBOL(gen_pool_virt_to_phys);
74 103
75/** 104/**
76 * gen_pool_destroy - destroy a special memory pool 105 * gen_pool_destroy - destroy a special memory pool
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index a235f3cc471c..2dbae88090ac 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -17,6 +17,7 @@
17#include <linux/math64.h> 17#include <linux/math64.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <asm/uaccess.h>
20 21
21static inline char _tolower(const char c) 22static inline char _tolower(const char c)
22{ 23{
@@ -222,3 +223,28 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
222 return 0; 223 return 0;
223} 224}
224EXPORT_SYMBOL(kstrtos8); 225EXPORT_SYMBOL(kstrtos8);
226
227#define kstrto_from_user(f, g, type) \
228int f(const char __user *s, size_t count, unsigned int base, type *res) \
229{ \
230 /* sign, base 2 representation, newline, terminator */ \
231 char buf[1 + sizeof(type) * 8 + 1 + 1]; \
232 \
233 count = min(count, sizeof(buf) - 1); \
234 if (copy_from_user(buf, s, count)) \
235 return -EFAULT; \
236 buf[count] = '\0'; \
237 return g(buf, base, res); \
238} \
239EXPORT_SYMBOL(f)
240
241kstrto_from_user(kstrtoull_from_user, kstrtoull, unsigned long long);
242kstrto_from_user(kstrtoll_from_user, kstrtoll, long long);
243kstrto_from_user(kstrtoul_from_user, kstrtoul, unsigned long);
244kstrto_from_user(kstrtol_from_user, kstrtol, long);
245kstrto_from_user(kstrtouint_from_user, kstrtouint, unsigned int);
246kstrto_from_user(kstrtoint_from_user, kstrtoint, int);
247kstrto_from_user(kstrtou16_from_user, kstrtou16, u16);
248kstrto_from_user(kstrtos16_from_user, kstrtos16, s16);
249kstrto_from_user(kstrtou8_from_user, kstrtou8, u8);
250kstrto_from_user(kstrtos8_from_user, kstrtos8, s8);
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 270de9d31b8c..a07e7268d7ed 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -84,7 +84,7 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
84 if (e_count > LC_MAX_ACTIVE) 84 if (e_count > LC_MAX_ACTIVE)
85 return NULL; 85 return NULL;
86 86
87 slot = kzalloc(e_count * sizeof(struct hlist_head*), GFP_KERNEL); 87 slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
88 if (!slot) 88 if (!slot)
89 goto out_fail; 89 goto out_fail;
90 element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL); 90 element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 90cbe4bb5960..4407f8c9b1f7 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -16,7 +16,7 @@ void show_mem(unsigned int filter)
16 nonshared = 0, highmem = 0; 16 nonshared = 0, highmem = 0;
17 17
18 printk("Mem-Info:\n"); 18 printk("Mem-Info:\n");
19 __show_free_areas(filter); 19 show_free_areas(filter);
20 20
21 for_each_online_pgdat(pgdat) { 21 for_each_online_pgdat(pgdat) {
22 unsigned long i, flags; 22 unsigned long i, flags;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 1d659d7bb0f8..c11205688fb4 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -898,7 +898,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
898 case 'U': 898 case 'U':
899 return uuid_string(buf, end, ptr, spec, fmt); 899 return uuid_string(buf, end, ptr, spec, fmt);
900 case 'V': 900 case 'V':
901 return buf + vsnprintf(buf, end - buf, 901 return buf + vsnprintf(buf, end > buf ? end - buf : 0,
902 ((struct va_format *)ptr)->fmt, 902 ((struct va_format *)ptr)->fmt,
903 *(((struct va_format *)ptr)->va)); 903 *(((struct va_format *)ptr)->va));
904 case 'K': 904 case 'K':
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index befc87531e4f..f032e6e1e09a 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -63,10 +63,10 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
63 unsigned long background_thresh; 63 unsigned long background_thresh;
64 unsigned long dirty_thresh; 64 unsigned long dirty_thresh;
65 unsigned long bdi_thresh; 65 unsigned long bdi_thresh;
66 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; 66 unsigned long nr_dirty, nr_io, nr_more_io;
67 struct inode *inode; 67 struct inode *inode;
68 68
69 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 69 nr_dirty = nr_io = nr_more_io = 0;
70 spin_lock(&inode_wb_list_lock); 70 spin_lock(&inode_wb_list_lock);
71 list_for_each_entry(inode, &wb->b_dirty, i_wb_list) 71 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
72 nr_dirty++; 72 nr_dirty++;
diff --git a/mm/filemap.c b/mm/filemap.c
index c641edf553a9..68e782b3d3de 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -58,16 +58,16 @@
58/* 58/*
59 * Lock ordering: 59 * Lock ordering:
60 * 60 *
61 * ->i_mmap_lock (truncate_pagecache) 61 * ->i_mmap_mutex (truncate_pagecache)
62 * ->private_lock (__free_pte->__set_page_dirty_buffers) 62 * ->private_lock (__free_pte->__set_page_dirty_buffers)
63 * ->swap_lock (exclusive_swap_page, others) 63 * ->swap_lock (exclusive_swap_page, others)
64 * ->mapping->tree_lock 64 * ->mapping->tree_lock
65 * 65 *
66 * ->i_mutex 66 * ->i_mutex
67 * ->i_mmap_lock (truncate->unmap_mapping_range) 67 * ->i_mmap_mutex (truncate->unmap_mapping_range)
68 * 68 *
69 * ->mmap_sem 69 * ->mmap_sem
70 * ->i_mmap_lock 70 * ->i_mmap_mutex
71 * ->page_table_lock or pte_lock (various, mainly in memory.c) 71 * ->page_table_lock or pte_lock (various, mainly in memory.c)
72 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 72 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
73 * 73 *
@@ -84,7 +84,7 @@
84 * sb_lock (fs/fs-writeback.c) 84 * sb_lock (fs/fs-writeback.c)
85 * ->mapping->tree_lock (__sync_single_inode) 85 * ->mapping->tree_lock (__sync_single_inode)
86 * 86 *
87 * ->i_mmap_lock 87 * ->i_mmap_mutex
88 * ->anon_vma.lock (vma_adjust) 88 * ->anon_vma.lock (vma_adjust)
89 * 89 *
90 * ->anon_vma.lock 90 * ->anon_vma.lock
@@ -106,7 +106,7 @@
106 * 106 *
107 * (code doesn't rely on that order, so you could switch it around) 107 * (code doesn't rely on that order, so you could switch it around)
108 * ->tasklist_lock (memory_failure, collect_procs_ao) 108 * ->tasklist_lock (memory_failure, collect_procs_ao)
109 * ->i_mmap_lock 109 * ->i_mmap_mutex
110 */ 110 */
111 111
112/* 112/*
@@ -562,6 +562,17 @@ void wait_on_page_bit(struct page *page, int bit_nr)
562} 562}
563EXPORT_SYMBOL(wait_on_page_bit); 563EXPORT_SYMBOL(wait_on_page_bit);
564 564
565int wait_on_page_bit_killable(struct page *page, int bit_nr)
566{
567 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
568
569 if (!test_bit(bit_nr, &page->flags))
570 return 0;
571
572 return __wait_on_bit(page_waitqueue(page), &wait,
573 sleep_on_page_killable, TASK_KILLABLE);
574}
575
565/** 576/**
566 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 577 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
567 * @page: Page defining the wait queue of interest 578 * @page: Page defining the wait queue of interest
@@ -643,15 +654,32 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
643int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 654int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
644 unsigned int flags) 655 unsigned int flags)
645{ 656{
646 if (!(flags & FAULT_FLAG_ALLOW_RETRY)) { 657 if (flags & FAULT_FLAG_ALLOW_RETRY) {
647 __lock_page(page); 658 /*
648 return 1; 659 * CAUTION! In this case, mmap_sem is not released
649 } else { 660 * even though return 0.
650 if (!(flags & FAULT_FLAG_RETRY_NOWAIT)) { 661 */
651 up_read(&mm->mmap_sem); 662 if (flags & FAULT_FLAG_RETRY_NOWAIT)
663 return 0;
664
665 up_read(&mm->mmap_sem);
666 if (flags & FAULT_FLAG_KILLABLE)
667 wait_on_page_locked_killable(page);
668 else
652 wait_on_page_locked(page); 669 wait_on_page_locked(page);
653 }
654 return 0; 670 return 0;
671 } else {
672 if (flags & FAULT_FLAG_KILLABLE) {
673 int ret;
674
675 ret = __lock_page_killable(page);
676 if (ret) {
677 up_read(&mm->mmap_sem);
678 return 0;
679 }
680 } else
681 __lock_page(page);
682 return 1;
655 } 683 }
656} 684}
657 685
@@ -1528,15 +1556,17 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1528 /* If we don't want any read-ahead, don't bother */ 1556 /* If we don't want any read-ahead, don't bother */
1529 if (VM_RandomReadHint(vma)) 1557 if (VM_RandomReadHint(vma))
1530 return; 1558 return;
1559 if (!ra->ra_pages)
1560 return;
1531 1561
1532 if (VM_SequentialReadHint(vma) || 1562 if (VM_SequentialReadHint(vma)) {
1533 offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
1534 page_cache_sync_readahead(mapping, ra, file, offset, 1563 page_cache_sync_readahead(mapping, ra, file, offset,
1535 ra->ra_pages); 1564 ra->ra_pages);
1536 return; 1565 return;
1537 } 1566 }
1538 1567
1539 if (ra->mmap_miss < INT_MAX) 1568 /* Avoid banging the cache line if not needed */
1569 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1540 ra->mmap_miss++; 1570 ra->mmap_miss++;
1541 1571
1542 /* 1572 /*
@@ -1550,12 +1580,10 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1550 * mmap read-around 1580 * mmap read-around
1551 */ 1581 */
1552 ra_pages = max_sane_readahead(ra->ra_pages); 1582 ra_pages = max_sane_readahead(ra->ra_pages);
1553 if (ra_pages) { 1583 ra->start = max_t(long, 0, offset - ra_pages / 2);
1554 ra->start = max_t(long, 0, offset - ra_pages/2); 1584 ra->size = ra_pages;
1555 ra->size = ra_pages; 1585 ra->async_size = ra_pages / 4;
1556 ra->async_size = 0; 1586 ra_submit(ra, mapping, file);
1557 ra_submit(ra, mapping, file);
1558 }
1559} 1587}
1560 1588
1561/* 1589/*
@@ -1660,7 +1688,6 @@ retry_find:
1660 return VM_FAULT_SIGBUS; 1688 return VM_FAULT_SIGBUS;
1661 } 1689 }
1662 1690
1663 ra->prev_pos = (loff_t)offset << PAGE_CACHE_SHIFT;
1664 vmf->page = page; 1691 vmf->page = page;
1665 return ret | VM_FAULT_LOCKED; 1692 return ret | VM_FAULT_LOCKED;
1666 1693
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 83364df74a33..93356cd12828 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -183,7 +183,7 @@ __xip_unmap (struct address_space * mapping,
183 return; 183 return;
184 184
185retry: 185retry:
186 spin_lock(&mapping->i_mmap_lock); 186 mutex_lock(&mapping->i_mmap_mutex);
187 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 187 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
188 mm = vma->vm_mm; 188 mm = vma->vm_mm;
189 address = vma->vm_start + 189 address = vma->vm_start +
@@ -201,7 +201,7 @@ retry:
201 page_cache_release(page); 201 page_cache_release(page);
202 } 202 }
203 } 203 }
204 spin_unlock(&mapping->i_mmap_lock); 204 mutex_unlock(&mapping->i_mmap_mutex);
205 205
206 if (locked) { 206 if (locked) {
207 mutex_unlock(&xip_sparse_mutex); 207 mutex_unlock(&xip_sparse_mutex);
diff --git a/mm/fremap.c b/mm/fremap.c
index ec520c7b28df..7f4123056e06 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -211,13 +211,13 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
211 } 211 }
212 goto out; 212 goto out;
213 } 213 }
214 spin_lock(&mapping->i_mmap_lock); 214 mutex_lock(&mapping->i_mmap_mutex);
215 flush_dcache_mmap_lock(mapping); 215 flush_dcache_mmap_lock(mapping);
216 vma->vm_flags |= VM_NONLINEAR; 216 vma->vm_flags |= VM_NONLINEAR;
217 vma_prio_tree_remove(vma, &mapping->i_mmap); 217 vma_prio_tree_remove(vma, &mapping->i_mmap);
218 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); 218 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
219 flush_dcache_mmap_unlock(mapping); 219 flush_dcache_mmap_unlock(mapping);
220 spin_unlock(&mapping->i_mmap_lock); 220 mutex_unlock(&mapping->i_mmap_mutex);
221 } 221 }
222 222
223 if (vma->vm_flags & VM_LOCKED) { 223 if (vma->vm_flags & VM_LOCKED) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 83326ad66d9b..615d9743a3cb 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1139,7 +1139,7 @@ static int __split_huge_page_splitting(struct page *page,
1139 * We can't temporarily set the pmd to null in order 1139 * We can't temporarily set the pmd to null in order
1140 * to split it, the pmd must remain marked huge at all 1140 * to split it, the pmd must remain marked huge at all
1141 * times or the VM won't take the pmd_trans_huge paths 1141 * times or the VM won't take the pmd_trans_huge paths
1142 * and it won't wait on the anon_vma->root->lock to 1142 * and it won't wait on the anon_vma->root->mutex to
1143 * serialize against split_huge_page*. 1143 * serialize against split_huge_page*.
1144 */ 1144 */
1145 pmdp_splitting_flush_notify(vma, address, pmd); 1145 pmdp_splitting_flush_notify(vma, address, pmd);
@@ -1333,7 +1333,7 @@ static int __split_huge_page_map(struct page *page,
1333 return ret; 1333 return ret;
1334} 1334}
1335 1335
1336/* must be called with anon_vma->root->lock hold */ 1336/* must be called with anon_vma->root->mutex hold */
1337static void __split_huge_page(struct page *page, 1337static void __split_huge_page(struct page *page,
1338 struct anon_vma *anon_vma) 1338 struct anon_vma *anon_vma)
1339{ 1339{
@@ -1771,12 +1771,9 @@ static void collapse_huge_page(struct mm_struct *mm,
1771 1771
1772 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1772 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1773#ifndef CONFIG_NUMA 1773#ifndef CONFIG_NUMA
1774 up_read(&mm->mmap_sem);
1774 VM_BUG_ON(!*hpage); 1775 VM_BUG_ON(!*hpage);
1775 new_page = *hpage; 1776 new_page = *hpage;
1776 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1777 up_read(&mm->mmap_sem);
1778 return;
1779 }
1780#else 1777#else
1781 VM_BUG_ON(*hpage); 1778 VM_BUG_ON(*hpage);
1782 /* 1779 /*
@@ -1791,22 +1788,26 @@ static void collapse_huge_page(struct mm_struct *mm,
1791 */ 1788 */
1792 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, 1789 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1793 node, __GFP_OTHER_NODE); 1790 node, __GFP_OTHER_NODE);
1791
1792 /*
1793 * After allocating the hugepage, release the mmap_sem read lock in
1794 * preparation for taking it in write mode.
1795 */
1796 up_read(&mm->mmap_sem);
1794 if (unlikely(!new_page)) { 1797 if (unlikely(!new_page)) {
1795 up_read(&mm->mmap_sem);
1796 count_vm_event(THP_COLLAPSE_ALLOC_FAILED); 1798 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1797 *hpage = ERR_PTR(-ENOMEM); 1799 *hpage = ERR_PTR(-ENOMEM);
1798 return; 1800 return;
1799 } 1801 }
1802#endif
1803
1800 count_vm_event(THP_COLLAPSE_ALLOC); 1804 count_vm_event(THP_COLLAPSE_ALLOC);
1801 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1805 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1802 up_read(&mm->mmap_sem); 1806#ifdef CONFIG_NUMA
1803 put_page(new_page); 1807 put_page(new_page);
1808#endif
1804 return; 1809 return;
1805 } 1810 }
1806#endif
1807
1808 /* after allocating the hugepage upgrade to mmap_sem write mode */
1809 up_read(&mm->mmap_sem);
1810 1811
1811 /* 1812 /*
1812 * Prevent all access to pagetables with the exception of 1813 * Prevent all access to pagetables with the exception of
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bbb4a5bbb958..5fd68b95c671 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2205,7 +2205,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2205 unsigned long sz = huge_page_size(h); 2205 unsigned long sz = huge_page_size(h);
2206 2206
2207 /* 2207 /*
2208 * A page gathering list, protected by per file i_mmap_lock. The 2208 * A page gathering list, protected by per file i_mmap_mutex. The
2209 * lock is used to avoid list corruption from multiple unmapping 2209 * lock is used to avoid list corruption from multiple unmapping
2210 * of the same page since we are using page->lru. 2210 * of the same page since we are using page->lru.
2211 */ 2211 */
@@ -2274,9 +2274,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2274void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 2274void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2275 unsigned long end, struct page *ref_page) 2275 unsigned long end, struct page *ref_page)
2276{ 2276{
2277 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 2277 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2278 __unmap_hugepage_range(vma, start, end, ref_page); 2278 __unmap_hugepage_range(vma, start, end, ref_page);
2279 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 2279 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2280} 2280}
2281 2281
2282/* 2282/*
@@ -2308,7 +2308,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2308 * this mapping should be shared between all the VMAs, 2308 * this mapping should be shared between all the VMAs,
2309 * __unmap_hugepage_range() is called as the lock is already held 2309 * __unmap_hugepage_range() is called as the lock is already held
2310 */ 2310 */
2311 spin_lock(&mapping->i_mmap_lock); 2311 mutex_lock(&mapping->i_mmap_mutex);
2312 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 2312 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2313 /* Do not unmap the current VMA */ 2313 /* Do not unmap the current VMA */
2314 if (iter_vma == vma) 2314 if (iter_vma == vma)
@@ -2326,7 +2326,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2326 address, address + huge_page_size(h), 2326 address, address + huge_page_size(h),
2327 page); 2327 page);
2328 } 2328 }
2329 spin_unlock(&mapping->i_mmap_lock); 2329 mutex_unlock(&mapping->i_mmap_mutex);
2330 2330
2331 return 1; 2331 return 1;
2332} 2332}
@@ -2810,7 +2810,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2810 BUG_ON(address >= end); 2810 BUG_ON(address >= end);
2811 flush_cache_range(vma, address, end); 2811 flush_cache_range(vma, address, end);
2812 2812
2813 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 2813 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2814 spin_lock(&mm->page_table_lock); 2814 spin_lock(&mm->page_table_lock);
2815 for (; address < end; address += huge_page_size(h)) { 2815 for (; address < end; address += huge_page_size(h)) {
2816 ptep = huge_pte_offset(mm, address); 2816 ptep = huge_pte_offset(mm, address);
@@ -2825,7 +2825,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2825 } 2825 }
2826 } 2826 }
2827 spin_unlock(&mm->page_table_lock); 2827 spin_unlock(&mm->page_table_lock);
2828 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 2828 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2829 2829
2830 flush_tlb_range(vma, start, end); 2830 flush_tlb_range(vma, start, end);
2831} 2831}
diff --git a/mm/init-mm.c b/mm/init-mm.c
index 1d29cdfe8ebb..4019979b2637 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -21,6 +21,5 @@ struct mm_struct init_mm = {
21 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), 21 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
22 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), 22 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
23 .mmlist = LIST_HEAD_INIT(init_mm.mmlist), 23 .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
24 .cpu_vm_mask = CPU_MASK_ALL,
25 INIT_MM_CONTEXT(init_mm) 24 INIT_MM_CONTEXT(init_mm)
26}; 25};
diff --git a/mm/internal.h b/mm/internal.h
index 9d0ced8e505e..d071d380fb49 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -66,6 +66,10 @@ static inline unsigned long page_order(struct page *page)
66 return page_private(page); 66 return page_private(page);
67} 67}
68 68
69/* mm/util.c */
70void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71 struct vm_area_struct *prev, struct rb_node *rb_parent);
72
69#ifdef CONFIG_MMU 73#ifdef CONFIG_MMU
70extern long mlock_vma_pages_range(struct vm_area_struct *vma, 74extern long mlock_vma_pages_range(struct vm_area_struct *vma,
71 unsigned long start, unsigned long end); 75 unsigned long start, unsigned long end);
diff --git a/mm/ksm.c b/mm/ksm.c
index 942dfc73a2ff..d708b3ef2260 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -35,6 +35,7 @@
35#include <linux/ksm.h> 35#include <linux/ksm.h>
36#include <linux/hash.h> 36#include <linux/hash.h>
37#include <linux/freezer.h> 37#include <linux/freezer.h>
38#include <linux/oom.h>
38 39
39#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
40#include "internal.h" 41#include "internal.h"
@@ -1894,9 +1895,11 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
1894 if (ksm_run != flags) { 1895 if (ksm_run != flags) {
1895 ksm_run = flags; 1896 ksm_run = flags;
1896 if (flags & KSM_RUN_UNMERGE) { 1897 if (flags & KSM_RUN_UNMERGE) {
1897 current->flags |= PF_OOM_ORIGIN; 1898 int oom_score_adj;
1899
1900 oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
1898 err = unmerge_and_remove_all_rmap_items(); 1901 err = unmerge_and_remove_all_rmap_items();
1899 current->flags &= ~PF_OOM_ORIGIN; 1902 test_set_oom_score_adj(oom_score_adj);
1900 if (err) { 1903 if (err) {
1901 ksm_run = KSM_RUN_STOP; 1904 ksm_run = KSM_RUN_STOP;
1902 count = err; 1905 count = err;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 010f9166fa6e..d5fd3dcd3f2e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5169,19 +5169,12 @@ struct cgroup_subsys mem_cgroup_subsys = {
5169static int __init enable_swap_account(char *s) 5169static int __init enable_swap_account(char *s)
5170{ 5170{
5171 /* consider enabled if no parameter or 1 is given */ 5171 /* consider enabled if no parameter or 1 is given */
5172 if (!(*s) || !strcmp(s, "=1")) 5172 if (!strcmp(s, "1"))
5173 really_do_swap_account = 1; 5173 really_do_swap_account = 1;
5174 else if (!strcmp(s, "=0")) 5174 else if (!strcmp(s, "0"))
5175 really_do_swap_account = 0; 5175 really_do_swap_account = 0;
5176 return 1; 5176 return 1;
5177} 5177}
5178__setup("swapaccount", enable_swap_account); 5178__setup("swapaccount=", enable_swap_account);
5179 5179
5180static int __init disable_swap_account(char *s)
5181{
5182 printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n");
5183 enable_swap_account("=0");
5184 return 1;
5185}
5186__setup("noswapaccount", disable_swap_account);
5187#endif 5180#endif
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 2b9a5eef39e0..5c8f7e08928d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -239,7 +239,11 @@ void shake_page(struct page *p, int access)
239 if (access) { 239 if (access) {
240 int nr; 240 int nr;
241 do { 241 do {
242 nr = shrink_slab(1000, GFP_KERNEL, 1000); 242 struct shrink_control shrink = {
243 .gfp_mask = GFP_KERNEL,
244 };
245
246 nr = shrink_slab(&shrink, 1000, 1000);
243 if (page_count(p) == 1) 247 if (page_count(p) == 1)
244 break; 248 break;
245 } while (nr > 10); 249 } while (nr > 10);
@@ -429,7 +433,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
429 */ 433 */
430 434
431 read_lock(&tasklist_lock); 435 read_lock(&tasklist_lock);
432 spin_lock(&mapping->i_mmap_lock); 436 mutex_lock(&mapping->i_mmap_mutex);
433 for_each_process(tsk) { 437 for_each_process(tsk) {
434 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 438 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
435 439
@@ -449,7 +453,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
449 add_to_kill(tsk, page, vma, to_kill, tkc); 453 add_to_kill(tsk, page, vma, to_kill, tkc);
450 } 454 }
451 } 455 }
452 spin_unlock(&mapping->i_mmap_lock); 456 mutex_unlock(&mapping->i_mmap_mutex);
453 read_unlock(&tasklist_lock); 457 read_unlock(&tasklist_lock);
454} 458}
455 459
@@ -1440,16 +1444,12 @@ int soft_offline_page(struct page *page, int flags)
1440 */ 1444 */
1441 ret = invalidate_inode_page(page); 1445 ret = invalidate_inode_page(page);
1442 unlock_page(page); 1446 unlock_page(page);
1443
1444 /* 1447 /*
1445 * Drop count because page migration doesn't like raised
1446 * counts. The page could get re-allocated, but if it becomes
1447 * LRU the isolation will just fail.
1448 * RED-PEN would be better to keep it isolated here, but we 1448 * RED-PEN would be better to keep it isolated here, but we
1449 * would need to fix isolation locking first. 1449 * would need to fix isolation locking first.
1450 */ 1450 */
1451 put_page(page);
1452 if (ret == 1) { 1451 if (ret == 1) {
1452 put_page(page);
1453 ret = 0; 1453 ret = 0;
1454 pr_info("soft_offline: %#lx: invalidated\n", pfn); 1454 pr_info("soft_offline: %#lx: invalidated\n", pfn);
1455 goto done; 1455 goto done;
@@ -1461,6 +1461,11 @@ int soft_offline_page(struct page *page, int flags)
1461 * handles a large number of cases for us. 1461 * handles a large number of cases for us.
1462 */ 1462 */
1463 ret = isolate_lru_page(page); 1463 ret = isolate_lru_page(page);
1464 /*
1465 * Drop page reference which is came from get_any_page()
1466 * successful isolate_lru_page() already took another one.
1467 */
1468 put_page(page);
1464 if (!ret) { 1469 if (!ret) {
1465 LIST_HEAD(pagelist); 1470 LIST_HEAD(pagelist);
1466 1471
diff --git a/mm/memory.c b/mm/memory.c
index 61e66f026563..b73f677f0bb1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -182,7 +182,7 @@ void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
182{ 182{
183 __sync_task_rss_stat(task, mm); 183 __sync_task_rss_stat(task, mm);
184} 184}
185#else 185#else /* SPLIT_RSS_COUNTING */
186 186
187#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) 187#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
188#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) 188#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
@@ -191,8 +191,205 @@ static void check_sync_rss_stat(struct task_struct *task)
191{ 191{
192} 192}
193 193
194#endif /* SPLIT_RSS_COUNTING */
195
196#ifdef HAVE_GENERIC_MMU_GATHER
197
198static int tlb_next_batch(struct mmu_gather *tlb)
199{
200 struct mmu_gather_batch *batch;
201
202 batch = tlb->active;
203 if (batch->next) {
204 tlb->active = batch->next;
205 return 1;
206 }
207
208 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
209 if (!batch)
210 return 0;
211
212 batch->next = NULL;
213 batch->nr = 0;
214 batch->max = MAX_GATHER_BATCH;
215
216 tlb->active->next = batch;
217 tlb->active = batch;
218
219 return 1;
220}
221
222/* tlb_gather_mmu
223 * Called to initialize an (on-stack) mmu_gather structure for page-table
224 * tear-down from @mm. The @fullmm argument is used when @mm is without
225 * users and we're going to destroy the full address space (exit/execve).
226 */
227void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
228{
229 tlb->mm = mm;
230
231 tlb->fullmm = fullmm;
232 tlb->need_flush = 0;
233 tlb->fast_mode = (num_possible_cpus() == 1);
234 tlb->local.next = NULL;
235 tlb->local.nr = 0;
236 tlb->local.max = ARRAY_SIZE(tlb->__pages);
237 tlb->active = &tlb->local;
238
239#ifdef CONFIG_HAVE_RCU_TABLE_FREE
240 tlb->batch = NULL;
241#endif
242}
243
244void tlb_flush_mmu(struct mmu_gather *tlb)
245{
246 struct mmu_gather_batch *batch;
247
248 if (!tlb->need_flush)
249 return;
250 tlb->need_flush = 0;
251 tlb_flush(tlb);
252#ifdef CONFIG_HAVE_RCU_TABLE_FREE
253 tlb_table_flush(tlb);
194#endif 254#endif
195 255
256 if (tlb_fast_mode(tlb))
257 return;
258
259 for (batch = &tlb->local; batch; batch = batch->next) {
260 free_pages_and_swap_cache(batch->pages, batch->nr);
261 batch->nr = 0;
262 }
263 tlb->active = &tlb->local;
264}
265
266/* tlb_finish_mmu
267 * Called at the end of the shootdown operation to free up any resources
268 * that were required.
269 */
270void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
271{
272 struct mmu_gather_batch *batch, *next;
273
274 tlb_flush_mmu(tlb);
275
276 /* keep the page table cache within bounds */
277 check_pgt_cache();
278
279 for (batch = tlb->local.next; batch; batch = next) {
280 next = batch->next;
281 free_pages((unsigned long)batch, 0);
282 }
283 tlb->local.next = NULL;
284}
285
286/* __tlb_remove_page
287 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
288 * handling the additional races in SMP caused by other CPUs caching valid
289 * mappings in their TLBs. Returns the number of free page slots left.
290 * When out of page slots we must call tlb_flush_mmu().
291 */
292int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
293{
294 struct mmu_gather_batch *batch;
295
296 tlb->need_flush = 1;
297
298 if (tlb_fast_mode(tlb)) {
299 free_page_and_swap_cache(page);
300 return 1; /* avoid calling tlb_flush_mmu() */
301 }
302
303 batch = tlb->active;
304 batch->pages[batch->nr++] = page;
305 if (batch->nr == batch->max) {
306 if (!tlb_next_batch(tlb))
307 return 0;
308 }
309 VM_BUG_ON(batch->nr > batch->max);
310
311 return batch->max - batch->nr;
312}
313
314#endif /* HAVE_GENERIC_MMU_GATHER */
315
316#ifdef CONFIG_HAVE_RCU_TABLE_FREE
317
318/*
319 * See the comment near struct mmu_table_batch.
320 */
321
322static void tlb_remove_table_smp_sync(void *arg)
323{
324 /* Simply deliver the interrupt */
325}
326
327static void tlb_remove_table_one(void *table)
328{
329 /*
330 * This isn't an RCU grace period and hence the page-tables cannot be
331 * assumed to be actually RCU-freed.
332 *
333 * It is however sufficient for software page-table walkers that rely on
334 * IRQ disabling. See the comment near struct mmu_table_batch.
335 */
336 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
337 __tlb_remove_table(table);
338}
339
340static void tlb_remove_table_rcu(struct rcu_head *head)
341{
342 struct mmu_table_batch *batch;
343 int i;
344
345 batch = container_of(head, struct mmu_table_batch, rcu);
346
347 for (i = 0; i < batch->nr; i++)
348 __tlb_remove_table(batch->tables[i]);
349
350 free_page((unsigned long)batch);
351}
352
353void tlb_table_flush(struct mmu_gather *tlb)
354{
355 struct mmu_table_batch **batch = &tlb->batch;
356
357 if (*batch) {
358 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
359 *batch = NULL;
360 }
361}
362
363void tlb_remove_table(struct mmu_gather *tlb, void *table)
364{
365 struct mmu_table_batch **batch = &tlb->batch;
366
367 tlb->need_flush = 1;
368
369 /*
370 * When there's less then two users of this mm there cannot be a
371 * concurrent page-table walk.
372 */
373 if (atomic_read(&tlb->mm->mm_users) < 2) {
374 __tlb_remove_table(table);
375 return;
376 }
377
378 if (*batch == NULL) {
379 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
380 if (*batch == NULL) {
381 tlb_remove_table_one(table);
382 return;
383 }
384 (*batch)->nr = 0;
385 }
386 (*batch)->tables[(*batch)->nr++] = table;
387 if ((*batch)->nr == MAX_TABLE_BATCH)
388 tlb_table_flush(tlb);
389}
390
391#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
392
196/* 393/*
197 * If a p?d_bad entry is found while walking page tables, report 394 * If a p?d_bad entry is found while walking page tables, report
198 * the error, before resetting entry to p?d_none. Usually (but 395 * the error, before resetting entry to p?d_none. Usually (but
@@ -909,26 +1106,24 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
909static unsigned long zap_pte_range(struct mmu_gather *tlb, 1106static unsigned long zap_pte_range(struct mmu_gather *tlb,
910 struct vm_area_struct *vma, pmd_t *pmd, 1107 struct vm_area_struct *vma, pmd_t *pmd,
911 unsigned long addr, unsigned long end, 1108 unsigned long addr, unsigned long end,
912 long *zap_work, struct zap_details *details) 1109 struct zap_details *details)
913{ 1110{
914 struct mm_struct *mm = tlb->mm; 1111 struct mm_struct *mm = tlb->mm;
915 pte_t *pte; 1112 int force_flush = 0;
916 spinlock_t *ptl;
917 int rss[NR_MM_COUNTERS]; 1113 int rss[NR_MM_COUNTERS];
1114 spinlock_t *ptl;
1115 pte_t *pte;
918 1116
1117again:
919 init_rss_vec(rss); 1118 init_rss_vec(rss);
920
921 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1119 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
922 arch_enter_lazy_mmu_mode(); 1120 arch_enter_lazy_mmu_mode();
923 do { 1121 do {
924 pte_t ptent = *pte; 1122 pte_t ptent = *pte;
925 if (pte_none(ptent)) { 1123 if (pte_none(ptent)) {
926 (*zap_work)--;
927 continue; 1124 continue;
928 } 1125 }
929 1126
930 (*zap_work) -= PAGE_SIZE;
931
932 if (pte_present(ptent)) { 1127 if (pte_present(ptent)) {
933 struct page *page; 1128 struct page *page;
934 1129
@@ -974,7 +1169,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
974 page_remove_rmap(page); 1169 page_remove_rmap(page);
975 if (unlikely(page_mapcount(page) < 0)) 1170 if (unlikely(page_mapcount(page) < 0))
976 print_bad_pte(vma, addr, ptent, page); 1171 print_bad_pte(vma, addr, ptent, page);
977 tlb_remove_page(tlb, page); 1172 force_flush = !__tlb_remove_page(tlb, page);
1173 if (force_flush)
1174 break;
978 continue; 1175 continue;
979 } 1176 }
980 /* 1177 /*
@@ -995,19 +1192,31 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
995 print_bad_pte(vma, addr, ptent, NULL); 1192 print_bad_pte(vma, addr, ptent, NULL);
996 } 1193 }
997 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 1194 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
998 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 1195 } while (pte++, addr += PAGE_SIZE, addr != end);
999 1196
1000 add_mm_rss_vec(mm, rss); 1197 add_mm_rss_vec(mm, rss);
1001 arch_leave_lazy_mmu_mode(); 1198 arch_leave_lazy_mmu_mode();
1002 pte_unmap_unlock(pte - 1, ptl); 1199 pte_unmap_unlock(pte - 1, ptl);
1003 1200
1201 /*
1202 * mmu_gather ran out of room to batch pages, we break out of
1203 * the PTE lock to avoid doing the potential expensive TLB invalidate
1204 * and page-free while holding it.
1205 */
1206 if (force_flush) {
1207 force_flush = 0;
1208 tlb_flush_mmu(tlb);
1209 if (addr != end)
1210 goto again;
1211 }
1212
1004 return addr; 1213 return addr;
1005} 1214}
1006 1215
1007static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 1216static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1008 struct vm_area_struct *vma, pud_t *pud, 1217 struct vm_area_struct *vma, pud_t *pud,
1009 unsigned long addr, unsigned long end, 1218 unsigned long addr, unsigned long end,
1010 long *zap_work, struct zap_details *details) 1219 struct zap_details *details)
1011{ 1220{
1012 pmd_t *pmd; 1221 pmd_t *pmd;
1013 unsigned long next; 1222 unsigned long next;
@@ -1019,19 +1228,15 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1019 if (next-addr != HPAGE_PMD_SIZE) { 1228 if (next-addr != HPAGE_PMD_SIZE) {
1020 VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); 1229 VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
1021 split_huge_page_pmd(vma->vm_mm, pmd); 1230 split_huge_page_pmd(vma->vm_mm, pmd);
1022 } else if (zap_huge_pmd(tlb, vma, pmd)) { 1231 } else if (zap_huge_pmd(tlb, vma, pmd))
1023 (*zap_work)--;
1024 continue; 1232 continue;
1025 }
1026 /* fall through */ 1233 /* fall through */
1027 } 1234 }
1028 if (pmd_none_or_clear_bad(pmd)) { 1235 if (pmd_none_or_clear_bad(pmd))
1029 (*zap_work)--;
1030 continue; 1236 continue;
1031 } 1237 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1032 next = zap_pte_range(tlb, vma, pmd, addr, next, 1238 cond_resched();
1033 zap_work, details); 1239 } while (pmd++, addr = next, addr != end);
1034 } while (pmd++, addr = next, (addr != end && *zap_work > 0));
1035 1240
1036 return addr; 1241 return addr;
1037} 1242}
@@ -1039,7 +1244,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1039static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1244static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1040 struct vm_area_struct *vma, pgd_t *pgd, 1245 struct vm_area_struct *vma, pgd_t *pgd,
1041 unsigned long addr, unsigned long end, 1246 unsigned long addr, unsigned long end,
1042 long *zap_work, struct zap_details *details) 1247 struct zap_details *details)
1043{ 1248{
1044 pud_t *pud; 1249 pud_t *pud;
1045 unsigned long next; 1250 unsigned long next;
@@ -1047,13 +1252,10 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1047 pud = pud_offset(pgd, addr); 1252 pud = pud_offset(pgd, addr);
1048 do { 1253 do {
1049 next = pud_addr_end(addr, end); 1254 next = pud_addr_end(addr, end);
1050 if (pud_none_or_clear_bad(pud)) { 1255 if (pud_none_or_clear_bad(pud))
1051 (*zap_work)--;
1052 continue; 1256 continue;
1053 } 1257 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1054 next = zap_pmd_range(tlb, vma, pud, addr, next, 1258 } while (pud++, addr = next, addr != end);
1055 zap_work, details);
1056 } while (pud++, addr = next, (addr != end && *zap_work > 0));
1057 1259
1058 return addr; 1260 return addr;
1059} 1261}
@@ -1061,7 +1263,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1061static unsigned long unmap_page_range(struct mmu_gather *tlb, 1263static unsigned long unmap_page_range(struct mmu_gather *tlb,
1062 struct vm_area_struct *vma, 1264 struct vm_area_struct *vma,
1063 unsigned long addr, unsigned long end, 1265 unsigned long addr, unsigned long end,
1064 long *zap_work, struct zap_details *details) 1266 struct zap_details *details)
1065{ 1267{
1066 pgd_t *pgd; 1268 pgd_t *pgd;
1067 unsigned long next; 1269 unsigned long next;
@@ -1075,13 +1277,10 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
1075 pgd = pgd_offset(vma->vm_mm, addr); 1277 pgd = pgd_offset(vma->vm_mm, addr);
1076 do { 1278 do {
1077 next = pgd_addr_end(addr, end); 1279 next = pgd_addr_end(addr, end);
1078 if (pgd_none_or_clear_bad(pgd)) { 1280 if (pgd_none_or_clear_bad(pgd))
1079 (*zap_work)--;
1080 continue; 1281 continue;
1081 } 1282 next = zap_pud_range(tlb, vma, pgd, addr, next, details);
1082 next = zap_pud_range(tlb, vma, pgd, addr, next, 1283 } while (pgd++, addr = next, addr != end);
1083 zap_work, details);
1084 } while (pgd++, addr = next, (addr != end && *zap_work > 0));
1085 tlb_end_vma(tlb, vma); 1284 tlb_end_vma(tlb, vma);
1086 mem_cgroup_uncharge_end(); 1285 mem_cgroup_uncharge_end();
1087 1286
@@ -1121,17 +1320,12 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
1121 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1320 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1122 * drops the lock and schedules. 1321 * drops the lock and schedules.
1123 */ 1322 */
1124unsigned long unmap_vmas(struct mmu_gather **tlbp, 1323unsigned long unmap_vmas(struct mmu_gather *tlb,
1125 struct vm_area_struct *vma, unsigned long start_addr, 1324 struct vm_area_struct *vma, unsigned long start_addr,
1126 unsigned long end_addr, unsigned long *nr_accounted, 1325 unsigned long end_addr, unsigned long *nr_accounted,
1127 struct zap_details *details) 1326 struct zap_details *details)
1128{ 1327{
1129 long zap_work = ZAP_BLOCK_SIZE;
1130 unsigned long tlb_start = 0; /* For tlb_finish_mmu */
1131 int tlb_start_valid = 0;
1132 unsigned long start = start_addr; 1328 unsigned long start = start_addr;
1133 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
1134 int fullmm = (*tlbp)->fullmm;
1135 struct mm_struct *mm = vma->vm_mm; 1329 struct mm_struct *mm = vma->vm_mm;
1136 1330
1137 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 1331 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
@@ -1152,11 +1346,6 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
1152 untrack_pfn_vma(vma, 0, 0); 1346 untrack_pfn_vma(vma, 0, 0);
1153 1347
1154 while (start != end) { 1348 while (start != end) {
1155 if (!tlb_start_valid) {
1156 tlb_start = start;
1157 tlb_start_valid = 1;
1158 }
1159
1160 if (unlikely(is_vm_hugetlb_page(vma))) { 1349 if (unlikely(is_vm_hugetlb_page(vma))) {
1161 /* 1350 /*
1162 * It is undesirable to test vma->vm_file as it 1351 * It is undesirable to test vma->vm_file as it
@@ -1169,39 +1358,15 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
1169 * Since no pte has actually been setup, it is 1358 * Since no pte has actually been setup, it is
1170 * safe to do nothing in this case. 1359 * safe to do nothing in this case.
1171 */ 1360 */
1172 if (vma->vm_file) { 1361 if (vma->vm_file)
1173 unmap_hugepage_range(vma, start, end, NULL); 1362 unmap_hugepage_range(vma, start, end, NULL);
1174 zap_work -= (end - start) /
1175 pages_per_huge_page(hstate_vma(vma));
1176 }
1177 1363
1178 start = end; 1364 start = end;
1179 } else 1365 } else
1180 start = unmap_page_range(*tlbp, vma, 1366 start = unmap_page_range(tlb, vma, start, end, details);
1181 start, end, &zap_work, details);
1182
1183 if (zap_work > 0) {
1184 BUG_ON(start != end);
1185 break;
1186 }
1187
1188 tlb_finish_mmu(*tlbp, tlb_start, start);
1189
1190 if (need_resched() ||
1191 (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
1192 if (i_mmap_lock) {
1193 *tlbp = NULL;
1194 goto out;
1195 }
1196 cond_resched();
1197 }
1198
1199 *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
1200 tlb_start_valid = 0;
1201 zap_work = ZAP_BLOCK_SIZE;
1202 } 1367 }
1203 } 1368 }
1204out: 1369
1205 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 1370 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1206 return start; /* which is now the end (or restart) address */ 1371 return start; /* which is now the end (or restart) address */
1207} 1372}
@@ -1217,16 +1382,15 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
1217 unsigned long size, struct zap_details *details) 1382 unsigned long size, struct zap_details *details)
1218{ 1383{
1219 struct mm_struct *mm = vma->vm_mm; 1384 struct mm_struct *mm = vma->vm_mm;
1220 struct mmu_gather *tlb; 1385 struct mmu_gather tlb;
1221 unsigned long end = address + size; 1386 unsigned long end = address + size;
1222 unsigned long nr_accounted = 0; 1387 unsigned long nr_accounted = 0;
1223 1388
1224 lru_add_drain(); 1389 lru_add_drain();
1225 tlb = tlb_gather_mmu(mm, 0); 1390 tlb_gather_mmu(&tlb, mm, 0);
1226 update_hiwater_rss(mm); 1391 update_hiwater_rss(mm);
1227 end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 1392 end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
1228 if (tlb) 1393 tlb_finish_mmu(&tlb, address, end);
1229 tlb_finish_mmu(tlb, address, end);
1230 return end; 1394 return end;
1231} 1395}
1232 1396
@@ -2535,96 +2699,11 @@ unwritable_page:
2535 return ret; 2699 return ret;
2536} 2700}
2537 2701
2538/* 2702static void unmap_mapping_range_vma(struct vm_area_struct *vma,
2539 * Helper functions for unmap_mapping_range().
2540 *
2541 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
2542 *
2543 * We have to restart searching the prio_tree whenever we drop the lock,
2544 * since the iterator is only valid while the lock is held, and anyway
2545 * a later vma might be split and reinserted earlier while lock dropped.
2546 *
2547 * The list of nonlinear vmas could be handled more efficiently, using
2548 * a placeholder, but handle it in the same way until a need is shown.
2549 * It is important to search the prio_tree before nonlinear list: a vma
2550 * may become nonlinear and be shifted from prio_tree to nonlinear list
2551 * while the lock is dropped; but never shifted from list to prio_tree.
2552 *
2553 * In order to make forward progress despite restarting the search,
2554 * vm_truncate_count is used to mark a vma as now dealt with, so we can
2555 * quickly skip it next time around. Since the prio_tree search only
2556 * shows us those vmas affected by unmapping the range in question, we
2557 * can't efficiently keep all vmas in step with mapping->truncate_count:
2558 * so instead reset them all whenever it wraps back to 0 (then go to 1).
2559 * mapping->truncate_count and vma->vm_truncate_count are protected by
2560 * i_mmap_lock.
2561 *
2562 * In order to make forward progress despite repeatedly restarting some
2563 * large vma, note the restart_addr from unmap_vmas when it breaks out:
2564 * and restart from that address when we reach that vma again. It might
2565 * have been split or merged, shrunk or extended, but never shifted: so
2566 * restart_addr remains valid so long as it remains in the vma's range.
2567 * unmap_mapping_range forces truncate_count to leap over page-aligned
2568 * values so we can save vma's restart_addr in its truncate_count field.
2569 */
2570#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
2571
2572static void reset_vma_truncate_counts(struct address_space *mapping)
2573{
2574 struct vm_area_struct *vma;
2575 struct prio_tree_iter iter;
2576
2577 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
2578 vma->vm_truncate_count = 0;
2579 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
2580 vma->vm_truncate_count = 0;
2581}
2582
2583static int unmap_mapping_range_vma(struct vm_area_struct *vma,
2584 unsigned long start_addr, unsigned long end_addr, 2703 unsigned long start_addr, unsigned long end_addr,
2585 struct zap_details *details) 2704 struct zap_details *details)
2586{ 2705{
2587 unsigned long restart_addr; 2706 zap_page_range(vma, start_addr, end_addr - start_addr, details);
2588 int need_break;
2589
2590 /*
2591 * files that support invalidating or truncating portions of the
2592 * file from under mmaped areas must have their ->fault function
2593 * return a locked page (and set VM_FAULT_LOCKED in the return).
2594 * This provides synchronisation against concurrent unmapping here.
2595 */
2596
2597again:
2598 restart_addr = vma->vm_truncate_count;
2599 if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
2600 start_addr = restart_addr;
2601 if (start_addr >= end_addr) {
2602 /* Top of vma has been split off since last time */
2603 vma->vm_truncate_count = details->truncate_count;
2604 return 0;
2605 }
2606 }
2607
2608 restart_addr = zap_page_range(vma, start_addr,
2609 end_addr - start_addr, details);
2610 need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
2611
2612 if (restart_addr >= end_addr) {
2613 /* We have now completed this vma: mark it so */
2614 vma->vm_truncate_count = details->truncate_count;
2615 if (!need_break)
2616 return 0;
2617 } else {
2618 /* Note restart_addr in vma's truncate_count field */
2619 vma->vm_truncate_count = restart_addr;
2620 if (!need_break)
2621 goto again;
2622 }
2623
2624 spin_unlock(details->i_mmap_lock);
2625 cond_resched();
2626 spin_lock(details->i_mmap_lock);
2627 return -EINTR;
2628} 2707}
2629 2708
2630static inline void unmap_mapping_range_tree(struct prio_tree_root *root, 2709static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
@@ -2634,12 +2713,8 @@ static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
2634 struct prio_tree_iter iter; 2713 struct prio_tree_iter iter;
2635 pgoff_t vba, vea, zba, zea; 2714 pgoff_t vba, vea, zba, zea;
2636 2715
2637restart:
2638 vma_prio_tree_foreach(vma, &iter, root, 2716 vma_prio_tree_foreach(vma, &iter, root,
2639 details->first_index, details->last_index) { 2717 details->first_index, details->last_index) {
2640 /* Skip quickly over those we have already dealt with */
2641 if (vma->vm_truncate_count == details->truncate_count)
2642 continue;
2643 2718
2644 vba = vma->vm_pgoff; 2719 vba = vma->vm_pgoff;
2645 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; 2720 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
@@ -2651,11 +2726,10 @@ restart:
2651 if (zea > vea) 2726 if (zea > vea)
2652 zea = vea; 2727 zea = vea;
2653 2728
2654 if (unmap_mapping_range_vma(vma, 2729 unmap_mapping_range_vma(vma,
2655 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 2730 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
2656 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 2731 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
2657 details) < 0) 2732 details);
2658 goto restart;
2659 } 2733 }
2660} 2734}
2661 2735
@@ -2670,15 +2744,9 @@ static inline void unmap_mapping_range_list(struct list_head *head,
2670 * across *all* the pages in each nonlinear VMA, not just the pages 2744 * across *all* the pages in each nonlinear VMA, not just the pages
2671 * whose virtual address lies outside the file truncation point. 2745 * whose virtual address lies outside the file truncation point.
2672 */ 2746 */
2673restart:
2674 list_for_each_entry(vma, head, shared.vm_set.list) { 2747 list_for_each_entry(vma, head, shared.vm_set.list) {
2675 /* Skip quickly over those we have already dealt with */
2676 if (vma->vm_truncate_count == details->truncate_count)
2677 continue;
2678 details->nonlinear_vma = vma; 2748 details->nonlinear_vma = vma;
2679 if (unmap_mapping_range_vma(vma, vma->vm_start, 2749 unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
2680 vma->vm_end, details) < 0)
2681 goto restart;
2682 } 2750 }
2683} 2751}
2684 2752
@@ -2717,26 +2785,14 @@ void unmap_mapping_range(struct address_space *mapping,
2717 details.last_index = hba + hlen - 1; 2785 details.last_index = hba + hlen - 1;
2718 if (details.last_index < details.first_index) 2786 if (details.last_index < details.first_index)
2719 details.last_index = ULONG_MAX; 2787 details.last_index = ULONG_MAX;
2720 details.i_mmap_lock = &mapping->i_mmap_lock;
2721 2788
2722 mutex_lock(&mapping->unmap_mutex);
2723 spin_lock(&mapping->i_mmap_lock);
2724
2725 /* Protect against endless unmapping loops */
2726 mapping->truncate_count++;
2727 if (unlikely(is_restart_addr(mapping->truncate_count))) {
2728 if (mapping->truncate_count == 0)
2729 reset_vma_truncate_counts(mapping);
2730 mapping->truncate_count++;
2731 }
2732 details.truncate_count = mapping->truncate_count;
2733 2789
2790 mutex_lock(&mapping->i_mmap_mutex);
2734 if (unlikely(!prio_tree_empty(&mapping->i_mmap))) 2791 if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
2735 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2792 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2736 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2793 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2737 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2794 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2738 spin_unlock(&mapping->i_mmap_lock); 2795 mutex_unlock(&mapping->i_mmap_mutex);
2739 mutex_unlock(&mapping->unmap_mutex);
2740} 2796}
2741EXPORT_SYMBOL(unmap_mapping_range); 2797EXPORT_SYMBOL(unmap_mapping_range);
2742 2798
@@ -2966,7 +3022,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
2966 if (prev && prev->vm_end == address) 3022 if (prev && prev->vm_end == address)
2967 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; 3023 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
2968 3024
2969 expand_stack(vma, address - PAGE_SIZE); 3025 expand_downwards(vma, address - PAGE_SIZE);
2970 } 3026 }
2971 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { 3027 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
2972 struct vm_area_struct *next = vma->vm_next; 3028 struct vm_area_struct *next = vma->vm_next;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9ca1d604f7cd..9f646374e32f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -374,10 +374,6 @@ void online_page(struct page *page)
374 totalhigh_pages++; 374 totalhigh_pages++;
375#endif 375#endif
376 376
377#ifdef CONFIG_FLATMEM
378 max_mapnr = max(pfn, max_mapnr);
379#endif
380
381 ClearPageReserved(page); 377 ClearPageReserved(page);
382 init_page_count(page); 378 init_page_count(page);
383 __free_page(page); 379 __free_page(page);
@@ -400,7 +396,7 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
400} 396}
401 397
402 398
403int online_pages(unsigned long pfn, unsigned long nr_pages) 399int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
404{ 400{
405 unsigned long onlined_pages = 0; 401 unsigned long onlined_pages = 0;
406 struct zone *zone; 402 struct zone *zone;
@@ -459,8 +455,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
459 zone_pcp_update(zone); 455 zone_pcp_update(zone);
460 456
461 mutex_unlock(&zonelists_mutex); 457 mutex_unlock(&zonelists_mutex);
462 setup_per_zone_wmarks(); 458
463 calculate_zone_inactive_ratio(zone); 459 init_per_zone_wmark_min();
460
464 if (onlined_pages) { 461 if (onlined_pages) {
465 kswapd_run(zone_to_nid(zone)); 462 kswapd_run(zone_to_nid(zone));
466 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); 463 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
@@ -705,7 +702,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
705 if (!pfn_valid(pfn)) 702 if (!pfn_valid(pfn))
706 continue; 703 continue;
707 page = pfn_to_page(pfn); 704 page = pfn_to_page(pfn);
708 if (!page_count(page)) 705 if (!get_page_unless_zero(page))
709 continue; 706 continue;
710 /* 707 /*
711 * We can skip free pages. And we can only deal with pages on 708 * We can skip free pages. And we can only deal with pages on
@@ -713,6 +710,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
713 */ 710 */
714 ret = isolate_lru_page(page); 711 ret = isolate_lru_page(page);
715 if (!ret) { /* Success */ 712 if (!ret) { /* Success */
713 put_page(page);
716 list_add_tail(&page->lru, &source); 714 list_add_tail(&page->lru, &source);
717 move_pages--; 715 move_pages--;
718 inc_zone_page_state(page, NR_ISOLATED_ANON + 716 inc_zone_page_state(page, NR_ISOLATED_ANON +
@@ -724,6 +722,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
724 pfn); 722 pfn);
725 dump_page(page); 723 dump_page(page);
726#endif 724#endif
725 put_page(page);
727 /* Because we don't have big zone->lock. we should 726 /* Because we don't have big zone->lock. we should
728 check this again here. */ 727 check this again here. */
729 if (page_count(page)) { 728 if (page_count(page)) {
@@ -795,7 +794,7 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
795 return offlined; 794 return offlined;
796} 795}
797 796
798static int offline_pages(unsigned long start_pfn, 797static int __ref offline_pages(unsigned long start_pfn,
799 unsigned long end_pfn, unsigned long timeout) 798 unsigned long end_pfn, unsigned long timeout)
800{ 799{
801 unsigned long pfn, nr_pages, expire; 800 unsigned long pfn, nr_pages, expire;
@@ -893,8 +892,8 @@ repeat:
893 zone->zone_pgdat->node_present_pages -= offlined_pages; 892 zone->zone_pgdat->node_present_pages -= offlined_pages;
894 totalram_pages -= offlined_pages; 893 totalram_pages -= offlined_pages;
895 894
896 setup_per_zone_wmarks(); 895 init_per_zone_wmark_min();
897 calculate_zone_inactive_ratio(zone); 896
898 if (!node_present_pages(node)) { 897 if (!node_present_pages(node)) {
899 node_clear_state(node, N_HIGH_MEMORY); 898 node_clear_state(node, N_HIGH_MEMORY);
900 kswapd_stop(node); 899 kswapd_stop(node);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 959a8b8c7350..e7fb9d25c54e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -99,7 +99,6 @@
99/* Internal flags */ 99/* Internal flags */
100#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ 100#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
101#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ 101#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
102#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
103 102
104static struct kmem_cache *policy_cache; 103static struct kmem_cache *policy_cache;
105static struct kmem_cache *sn_cache; 104static struct kmem_cache *sn_cache;
@@ -457,7 +456,6 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
457 }, 456 },
458}; 457};
459 458
460static void gather_stats(struct page *, void *, int pte_dirty);
461static void migrate_page_add(struct page *page, struct list_head *pagelist, 459static void migrate_page_add(struct page *page, struct list_head *pagelist,
462 unsigned long flags); 460 unsigned long flags);
463 461
@@ -492,9 +490,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
492 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 490 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
493 continue; 491 continue;
494 492
495 if (flags & MPOL_MF_STATS) 493 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
496 gather_stats(page, private, pte_dirty(*pte));
497 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
498 migrate_page_add(page, private, flags); 494 migrate_page_add(page, private, flags);
499 else 495 else
500 break; 496 break;
@@ -1489,7 +1485,7 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1489 * freeing by another task. It is the caller's responsibility to free the 1485 * freeing by another task. It is the caller's responsibility to free the
1490 * extra reference for shared policies. 1486 * extra reference for shared policies.
1491 */ 1487 */
1492static struct mempolicy *get_vma_policy(struct task_struct *task, 1488struct mempolicy *get_vma_policy(struct task_struct *task,
1493 struct vm_area_struct *vma, unsigned long addr) 1489 struct vm_area_struct *vma, unsigned long addr)
1494{ 1490{
1495 struct mempolicy *pol = task->mempolicy; 1491 struct mempolicy *pol = task->mempolicy;
@@ -2529,159 +2525,3 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2529 } 2525 }
2530 return p - buffer; 2526 return p - buffer;
2531} 2527}
2532
2533struct numa_maps {
2534 unsigned long pages;
2535 unsigned long anon;
2536 unsigned long active;
2537 unsigned long writeback;
2538 unsigned long mapcount_max;
2539 unsigned long dirty;
2540 unsigned long swapcache;
2541 unsigned long node[MAX_NUMNODES];
2542};
2543
2544static void gather_stats(struct page *page, void *private, int pte_dirty)
2545{
2546 struct numa_maps *md = private;
2547 int count = page_mapcount(page);
2548
2549 md->pages++;
2550 if (pte_dirty || PageDirty(page))
2551 md->dirty++;
2552
2553 if (PageSwapCache(page))
2554 md->swapcache++;
2555
2556 if (PageActive(page) || PageUnevictable(page))
2557 md->active++;
2558
2559 if (PageWriteback(page))
2560 md->writeback++;
2561
2562 if (PageAnon(page))
2563 md->anon++;
2564
2565 if (count > md->mapcount_max)
2566 md->mapcount_max = count;
2567
2568 md->node[page_to_nid(page)]++;
2569}
2570
2571#ifdef CONFIG_HUGETLB_PAGE
2572static void check_huge_range(struct vm_area_struct *vma,
2573 unsigned long start, unsigned long end,
2574 struct numa_maps *md)
2575{
2576 unsigned long addr;
2577 struct page *page;
2578 struct hstate *h = hstate_vma(vma);
2579 unsigned long sz = huge_page_size(h);
2580
2581 for (addr = start; addr < end; addr += sz) {
2582 pte_t *ptep = huge_pte_offset(vma->vm_mm,
2583 addr & huge_page_mask(h));
2584 pte_t pte;
2585
2586 if (!ptep)
2587 continue;
2588
2589 pte = *ptep;
2590 if (pte_none(pte))
2591 continue;
2592
2593 page = pte_page(pte);
2594 if (!page)
2595 continue;
2596
2597 gather_stats(page, md, pte_dirty(*ptep));
2598 }
2599}
2600#else
2601static inline void check_huge_range(struct vm_area_struct *vma,
2602 unsigned long start, unsigned long end,
2603 struct numa_maps *md)
2604{
2605}
2606#endif
2607
2608/*
2609 * Display pages allocated per node and memory policy via /proc.
2610 */
2611int show_numa_map(struct seq_file *m, void *v)
2612{
2613 struct proc_maps_private *priv = m->private;
2614 struct vm_area_struct *vma = v;
2615 struct numa_maps *md;
2616 struct file *file = vma->vm_file;
2617 struct mm_struct *mm = vma->vm_mm;
2618 struct mempolicy *pol;
2619 int n;
2620 char buffer[50];
2621
2622 if (!mm)
2623 return 0;
2624
2625 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2626 if (!md)
2627 return 0;
2628
2629 pol = get_vma_policy(priv->task, vma, vma->vm_start);
2630 mpol_to_str(buffer, sizeof(buffer), pol, 0);
2631 mpol_cond_put(pol);
2632
2633 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2634
2635 if (file) {
2636 seq_printf(m, " file=");
2637 seq_path(m, &file->f_path, "\n\t= ");
2638 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2639 seq_printf(m, " heap");
2640 } else if (vma->vm_start <= mm->start_stack &&
2641 vma->vm_end >= mm->start_stack) {
2642 seq_printf(m, " stack");
2643 }
2644
2645 if (is_vm_hugetlb_page(vma)) {
2646 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2647 seq_printf(m, " huge");
2648 } else {
2649 check_pgd_range(vma, vma->vm_start, vma->vm_end,
2650 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2651 }
2652
2653 if (!md->pages)
2654 goto out;
2655
2656 if (md->anon)
2657 seq_printf(m," anon=%lu",md->anon);
2658
2659 if (md->dirty)
2660 seq_printf(m," dirty=%lu",md->dirty);
2661
2662 if (md->pages != md->anon && md->pages != md->dirty)
2663 seq_printf(m, " mapped=%lu", md->pages);
2664
2665 if (md->mapcount_max > 1)
2666 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2667
2668 if (md->swapcache)
2669 seq_printf(m," swapcache=%lu", md->swapcache);
2670
2671 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2672 seq_printf(m," active=%lu", md->active);
2673
2674 if (md->writeback)
2675 seq_printf(m," writeback=%lu", md->writeback);
2676
2677 for_each_node_state(n, N_HIGH_MEMORY)
2678 if (md->node[n])
2679 seq_printf(m, " N%d=%lu", n, md->node[n]);
2680out:
2681 seq_putc(m, '\n');
2682 kfree(md);
2683
2684 if (m->count < m->size)
2685 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2686 return 0;
2687}
diff --git a/mm/migrate.c b/mm/migrate.c
index 34132f8e9109..e4a5c912983d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -721,15 +721,11 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
721 * Only page_lock_anon_vma() understands the subtleties of 721 * Only page_lock_anon_vma() understands the subtleties of
722 * getting a hold on an anon_vma from outside one of its mms. 722 * getting a hold on an anon_vma from outside one of its mms.
723 */ 723 */
724 anon_vma = page_lock_anon_vma(page); 724 anon_vma = page_get_anon_vma(page);
725 if (anon_vma) { 725 if (anon_vma) {
726 /* 726 /*
727 * Take a reference count on the anon_vma if the 727 * Anon page
728 * page is mapped so that it is guaranteed to
729 * exist when the page is remapped later
730 */ 728 */
731 get_anon_vma(anon_vma);
732 page_unlock_anon_vma(anon_vma);
733 } else if (PageSwapCache(page)) { 729 } else if (PageSwapCache(page)) {
734 /* 730 /*
735 * We cannot be sure that the anon_vma of an unmapped 731 * We cannot be sure that the anon_vma of an unmapped
@@ -857,13 +853,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
857 lock_page(hpage); 853 lock_page(hpage);
858 } 854 }
859 855
860 if (PageAnon(hpage)) { 856 if (PageAnon(hpage))
861 anon_vma = page_lock_anon_vma(hpage); 857 anon_vma = page_get_anon_vma(hpage);
862 if (anon_vma) {
863 get_anon_vma(anon_vma);
864 page_unlock_anon_vma(anon_vma);
865 }
866 }
867 858
868 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 859 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
869 860
diff --git a/mm/mmap.c b/mm/mmap.c
index 772140c53ab1..ac2631b7477f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -84,10 +84,14 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
84} 84}
85EXPORT_SYMBOL(vm_get_page_prot); 85EXPORT_SYMBOL(vm_get_page_prot);
86 86
87int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 87int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
88int sysctl_overcommit_ratio = 50; /* default is 50% */ 88int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
89int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 89int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
90struct percpu_counter vm_committed_as; 90/*
91 * Make sure vm_committed_as in one cacheline and not cacheline shared with
92 * other variables. It can be updated by several CPUs frequently.
93 */
94struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
91 95
92/* 96/*
93 * Check that a process has enough memory to allocate a new virtual 97 * Check that a process has enough memory to allocate a new virtual
@@ -190,7 +194,7 @@ error:
190} 194}
191 195
192/* 196/*
193 * Requires inode->i_mapping->i_mmap_lock 197 * Requires inode->i_mapping->i_mmap_mutex
194 */ 198 */
195static void __remove_shared_vm_struct(struct vm_area_struct *vma, 199static void __remove_shared_vm_struct(struct vm_area_struct *vma,
196 struct file *file, struct address_space *mapping) 200 struct file *file, struct address_space *mapping)
@@ -218,9 +222,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
218 222
219 if (file) { 223 if (file) {
220 struct address_space *mapping = file->f_mapping; 224 struct address_space *mapping = file->f_mapping;
221 spin_lock(&mapping->i_mmap_lock); 225 mutex_lock(&mapping->i_mmap_mutex);
222 __remove_shared_vm_struct(vma, file, mapping); 226 __remove_shared_vm_struct(vma, file, mapping);
223 spin_unlock(&mapping->i_mmap_lock); 227 mutex_unlock(&mapping->i_mmap_mutex);
224 } 228 }
225} 229}
226 230
@@ -394,29 +398,6 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr,
394 return vma; 398 return vma;
395} 399}
396 400
397static inline void
398__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
399 struct vm_area_struct *prev, struct rb_node *rb_parent)
400{
401 struct vm_area_struct *next;
402
403 vma->vm_prev = prev;
404 if (prev) {
405 next = prev->vm_next;
406 prev->vm_next = vma;
407 } else {
408 mm->mmap = vma;
409 if (rb_parent)
410 next = rb_entry(rb_parent,
411 struct vm_area_struct, vm_rb);
412 else
413 next = NULL;
414 }
415 vma->vm_next = next;
416 if (next)
417 next->vm_prev = vma;
418}
419
420void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 401void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
421 struct rb_node **rb_link, struct rb_node *rb_parent) 402 struct rb_node **rb_link, struct rb_node *rb_parent)
422{ 403{
@@ -464,16 +445,14 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
464 if (vma->vm_file) 445 if (vma->vm_file)
465 mapping = vma->vm_file->f_mapping; 446 mapping = vma->vm_file->f_mapping;
466 447
467 if (mapping) { 448 if (mapping)
468 spin_lock(&mapping->i_mmap_lock); 449 mutex_lock(&mapping->i_mmap_mutex);
469 vma->vm_truncate_count = mapping->truncate_count;
470 }
471 450
472 __vma_link(mm, vma, prev, rb_link, rb_parent); 451 __vma_link(mm, vma, prev, rb_link, rb_parent);
473 __vma_link_file(vma); 452 __vma_link_file(vma);
474 453
475 if (mapping) 454 if (mapping)
476 spin_unlock(&mapping->i_mmap_lock); 455 mutex_unlock(&mapping->i_mmap_mutex);
477 456
478 mm->map_count++; 457 mm->map_count++;
479 validate_mm(mm); 458 validate_mm(mm);
@@ -576,17 +555,8 @@ again: remove_next = 1 + (end > next->vm_end);
576 mapping = file->f_mapping; 555 mapping = file->f_mapping;
577 if (!(vma->vm_flags & VM_NONLINEAR)) 556 if (!(vma->vm_flags & VM_NONLINEAR))
578 root = &mapping->i_mmap; 557 root = &mapping->i_mmap;
579 spin_lock(&mapping->i_mmap_lock); 558 mutex_lock(&mapping->i_mmap_mutex);
580 if (importer &&
581 vma->vm_truncate_count != next->vm_truncate_count) {
582 /*
583 * unmap_mapping_range might be in progress:
584 * ensure that the expanding vma is rescanned.
585 */
586 importer->vm_truncate_count = 0;
587 }
588 if (insert) { 559 if (insert) {
589 insert->vm_truncate_count = vma->vm_truncate_count;
590 /* 560 /*
591 * Put into prio_tree now, so instantiated pages 561 * Put into prio_tree now, so instantiated pages
592 * are visible to arm/parisc __flush_dcache_page 562 * are visible to arm/parisc __flush_dcache_page
@@ -605,7 +575,7 @@ again: remove_next = 1 + (end > next->vm_end);
605 * lock may be shared between many sibling processes. Skipping 575 * lock may be shared between many sibling processes. Skipping
606 * the lock for brk adjustments makes a difference sometimes. 576 * the lock for brk adjustments makes a difference sometimes.
607 */ 577 */
608 if (vma->anon_vma && (insert || importer || start != vma->vm_start)) { 578 if (vma->anon_vma && (importer || start != vma->vm_start)) {
609 anon_vma = vma->anon_vma; 579 anon_vma = vma->anon_vma;
610 anon_vma_lock(anon_vma); 580 anon_vma_lock(anon_vma);
611 } 581 }
@@ -652,7 +622,7 @@ again: remove_next = 1 + (end > next->vm_end);
652 if (anon_vma) 622 if (anon_vma)
653 anon_vma_unlock(anon_vma); 623 anon_vma_unlock(anon_vma);
654 if (mapping) 624 if (mapping)
655 spin_unlock(&mapping->i_mmap_lock); 625 mutex_unlock(&mapping->i_mmap_mutex);
656 626
657 if (remove_next) { 627 if (remove_next) {
658 if (file) { 628 if (file) {
@@ -699,9 +669,17 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
699} 669}
700 670
701static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, 671static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
702 struct anon_vma *anon_vma2) 672 struct anon_vma *anon_vma2,
673 struct vm_area_struct *vma)
703{ 674{
704 return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2); 675 /*
676 * The list_is_singular() test is to avoid merging VMA cloned from
677 * parents. This can improve scalability caused by anon_vma lock.
678 */
679 if ((!anon_vma1 || !anon_vma2) && (!vma ||
680 list_is_singular(&vma->anon_vma_chain)))
681 return 1;
682 return anon_vma1 == anon_vma2;
705} 683}
706 684
707/* 685/*
@@ -720,7 +698,7 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
720 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) 698 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
721{ 699{
722 if (is_mergeable_vma(vma, file, vm_flags) && 700 if (is_mergeable_vma(vma, file, vm_flags) &&
723 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { 701 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
724 if (vma->vm_pgoff == vm_pgoff) 702 if (vma->vm_pgoff == vm_pgoff)
725 return 1; 703 return 1;
726 } 704 }
@@ -739,7 +717,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
739 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) 717 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
740{ 718{
741 if (is_mergeable_vma(vma, file, vm_flags) && 719 if (is_mergeable_vma(vma, file, vm_flags) &&
742 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { 720 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
743 pgoff_t vm_pglen; 721 pgoff_t vm_pglen;
744 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 722 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
745 if (vma->vm_pgoff + vm_pglen == vm_pgoff) 723 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
@@ -817,7 +795,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
817 can_vma_merge_before(next, vm_flags, 795 can_vma_merge_before(next, vm_flags,
818 anon_vma, file, pgoff+pglen) && 796 anon_vma, file, pgoff+pglen) &&
819 is_mergeable_anon_vma(prev->anon_vma, 797 is_mergeable_anon_vma(prev->anon_vma,
820 next->anon_vma)) { 798 next->anon_vma, NULL)) {
821 /* cases 1, 6 */ 799 /* cases 1, 6 */
822 err = vma_adjust(prev, prev->vm_start, 800 err = vma_adjust(prev, prev->vm_start,
823 next->vm_end, prev->vm_pgoff, NULL); 801 next->vm_end, prev->vm_pgoff, NULL);
@@ -1785,7 +1763,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1785/* 1763/*
1786 * vma is the first one with address < vma->vm_start. Have to extend vma. 1764 * vma is the first one with address < vma->vm_start. Have to extend vma.
1787 */ 1765 */
1788static int expand_downwards(struct vm_area_struct *vma, 1766int expand_downwards(struct vm_area_struct *vma,
1789 unsigned long address) 1767 unsigned long address)
1790{ 1768{
1791 int error; 1769 int error;
@@ -1832,11 +1810,6 @@ static int expand_downwards(struct vm_area_struct *vma,
1832 return error; 1810 return error;
1833} 1811}
1834 1812
1835int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
1836{
1837 return expand_downwards(vma, address);
1838}
1839
1840#ifdef CONFIG_STACK_GROWSUP 1813#ifdef CONFIG_STACK_GROWSUP
1841int expand_stack(struct vm_area_struct *vma, unsigned long address) 1814int expand_stack(struct vm_area_struct *vma, unsigned long address)
1842{ 1815{
@@ -1919,17 +1892,17 @@ static void unmap_region(struct mm_struct *mm,
1919 unsigned long start, unsigned long end) 1892 unsigned long start, unsigned long end)
1920{ 1893{
1921 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; 1894 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1922 struct mmu_gather *tlb; 1895 struct mmu_gather tlb;
1923 unsigned long nr_accounted = 0; 1896 unsigned long nr_accounted = 0;
1924 1897
1925 lru_add_drain(); 1898 lru_add_drain();
1926 tlb = tlb_gather_mmu(mm, 0); 1899 tlb_gather_mmu(&tlb, mm, 0);
1927 update_hiwater_rss(mm); 1900 update_hiwater_rss(mm);
1928 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); 1901 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1929 vm_unacct_memory(nr_accounted); 1902 vm_unacct_memory(nr_accounted);
1930 free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, 1903 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1931 next? next->vm_start: 0); 1904 next ? next->vm_start : 0);
1932 tlb_finish_mmu(tlb, start, end); 1905 tlb_finish_mmu(&tlb, start, end);
1933} 1906}
1934 1907
1935/* 1908/*
@@ -2271,7 +2244,7 @@ EXPORT_SYMBOL(do_brk);
2271/* Release all mmaps. */ 2244/* Release all mmaps. */
2272void exit_mmap(struct mm_struct *mm) 2245void exit_mmap(struct mm_struct *mm)
2273{ 2246{
2274 struct mmu_gather *tlb; 2247 struct mmu_gather tlb;
2275 struct vm_area_struct *vma; 2248 struct vm_area_struct *vma;
2276 unsigned long nr_accounted = 0; 2249 unsigned long nr_accounted = 0;
2277 unsigned long end; 2250 unsigned long end;
@@ -2296,14 +2269,14 @@ void exit_mmap(struct mm_struct *mm)
2296 2269
2297 lru_add_drain(); 2270 lru_add_drain();
2298 flush_cache_mm(mm); 2271 flush_cache_mm(mm);
2299 tlb = tlb_gather_mmu(mm, 1); 2272 tlb_gather_mmu(&tlb, mm, 1);
2300 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2273 /* update_hiwater_rss(mm) here? but nobody should be looking */
2301 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2274 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2302 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2275 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2303 vm_unacct_memory(nr_accounted); 2276 vm_unacct_memory(nr_accounted);
2304 2277
2305 free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); 2278 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
2306 tlb_finish_mmu(tlb, 0, end); 2279 tlb_finish_mmu(&tlb, 0, end);
2307 2280
2308 /* 2281 /*
2309 * Walk the list again, actually closing and freeing it, 2282 * Walk the list again, actually closing and freeing it,
@@ -2317,7 +2290,7 @@ void exit_mmap(struct mm_struct *mm)
2317 2290
2318/* Insert vm structure into process list sorted by address 2291/* Insert vm structure into process list sorted by address
2319 * and into the inode's i_mmap tree. If vm_file is non-NULL 2292 * and into the inode's i_mmap tree. If vm_file is non-NULL
2320 * then i_mmap_lock is taken here. 2293 * then i_mmap_mutex is taken here.
2321 */ 2294 */
2322int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) 2295int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
2323{ 2296{
@@ -2529,15 +2502,15 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2529 * The LSB of head.next can't change from under us 2502 * The LSB of head.next can't change from under us
2530 * because we hold the mm_all_locks_mutex. 2503 * because we hold the mm_all_locks_mutex.
2531 */ 2504 */
2532 spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem); 2505 mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
2533 /* 2506 /*
2534 * We can safely modify head.next after taking the 2507 * We can safely modify head.next after taking the
2535 * anon_vma->root->lock. If some other vma in this mm shares 2508 * anon_vma->root->mutex. If some other vma in this mm shares
2536 * the same anon_vma we won't take it again. 2509 * the same anon_vma we won't take it again.
2537 * 2510 *
2538 * No need of atomic instructions here, head.next 2511 * No need of atomic instructions here, head.next
2539 * can't change from under us thanks to the 2512 * can't change from under us thanks to the
2540 * anon_vma->root->lock. 2513 * anon_vma->root->mutex.
2541 */ 2514 */
2542 if (__test_and_set_bit(0, (unsigned long *) 2515 if (__test_and_set_bit(0, (unsigned long *)
2543 &anon_vma->root->head.next)) 2516 &anon_vma->root->head.next))
@@ -2559,7 +2532,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2559 */ 2532 */
2560 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 2533 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2561 BUG(); 2534 BUG();
2562 spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem); 2535 mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
2563 } 2536 }
2564} 2537}
2565 2538
@@ -2586,7 +2559,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2586 * vma in this mm is backed by the same anon_vma or address_space. 2559 * vma in this mm is backed by the same anon_vma or address_space.
2587 * 2560 *
2588 * We can take all the locks in random order because the VM code 2561 * We can take all the locks in random order because the VM code
2589 * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never 2562 * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never
2590 * takes more than one of them in a row. Secondly we're protected 2563 * takes more than one of them in a row. Secondly we're protected
2591 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 2564 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2592 * 2565 *
@@ -2642,7 +2615,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2642 * 2615 *
2643 * No need of atomic instructions here, head.next 2616 * No need of atomic instructions here, head.next
2644 * can't change from under us until we release the 2617 * can't change from under us until we release the
2645 * anon_vma->root->lock. 2618 * anon_vma->root->mutex.
2646 */ 2619 */
2647 if (!__test_and_clear_bit(0, (unsigned long *) 2620 if (!__test_and_clear_bit(0, (unsigned long *)
2648 &anon_vma->root->head.next)) 2621 &anon_vma->root->head.next))
@@ -2658,7 +2631,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
2658 * AS_MM_ALL_LOCKS can't change to 0 from under us 2631 * AS_MM_ALL_LOCKS can't change to 0 from under us
2659 * because we hold the mm_all_locks_mutex. 2632 * because we hold the mm_all_locks_mutex.
2660 */ 2633 */
2661 spin_unlock(&mapping->i_mmap_lock); 2634 mutex_unlock(&mapping->i_mmap_mutex);
2662 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 2635 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2663 &mapping->flags)) 2636 &mapping->flags))
2664 BUG(); 2637 BUG();
diff --git a/mm/mremap.c b/mm/mremap.c
index a7c1f9f9b941..506fa44403df 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -93,8 +93,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
93 * and we propagate stale pages into the dst afterward. 93 * and we propagate stale pages into the dst afterward.
94 */ 94 */
95 mapping = vma->vm_file->f_mapping; 95 mapping = vma->vm_file->f_mapping;
96 spin_lock(&mapping->i_mmap_lock); 96 mutex_lock(&mapping->i_mmap_mutex);
97 new_vma->vm_truncate_count = 0;
98 } 97 }
99 98
100 /* 99 /*
@@ -123,7 +122,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
123 pte_unmap(new_pte - 1); 122 pte_unmap(new_pte - 1);
124 pte_unmap_unlock(old_pte - 1, old_ptl); 123 pte_unmap_unlock(old_pte - 1, old_ptl);
125 if (mapping) 124 if (mapping)
126 spin_unlock(&mapping->i_mmap_lock); 125 mutex_unlock(&mapping->i_mmap_mutex);
127 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); 126 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
128} 127}
129 128
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 9109049f0bbc..6e93dc7f2586 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -307,30 +307,7 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
307void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 307void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
308 unsigned long align, unsigned long goal) 308 unsigned long align, unsigned long goal)
309{ 309{
310#ifdef MAX_DMA32_PFN
311 unsigned long end_pfn;
312
313 if (WARN_ON_ONCE(slab_is_available()))
314 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
315
316 /* update goal according ...MAX_DMA32_PFN */
317 end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
318
319 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
320 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
321 void *ptr;
322 unsigned long new_goal;
323
324 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
325 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
326 new_goal, -1ULL);
327 if (ptr)
328 return ptr;
329 }
330#endif
331
332 return __alloc_bootmem_node(pgdat, size, align, goal); 310 return __alloc_bootmem_node(pgdat, size, align, goal);
333
334} 311}
335 312
336#ifdef CONFIG_SPARSEMEM 313#ifdef CONFIG_SPARSEMEM
diff --git a/mm/nommu.c b/mm/nommu.c
index c4c542c736a9..1fd0c51b10a6 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -680,9 +680,9 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
680 */ 680 */
681static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) 681static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
682{ 682{
683 struct vm_area_struct *pvma, **pp, *next; 683 struct vm_area_struct *pvma, *prev;
684 struct address_space *mapping; 684 struct address_space *mapping;
685 struct rb_node **p, *parent; 685 struct rb_node **p, *parent, *rb_prev;
686 686
687 kenter(",%p", vma); 687 kenter(",%p", vma);
688 688
@@ -703,7 +703,7 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
703 } 703 }
704 704
705 /* add the VMA to the tree */ 705 /* add the VMA to the tree */
706 parent = NULL; 706 parent = rb_prev = NULL;
707 p = &mm->mm_rb.rb_node; 707 p = &mm->mm_rb.rb_node;
708 while (*p) { 708 while (*p) {
709 parent = *p; 709 parent = *p;
@@ -713,17 +713,20 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
713 * (the latter is necessary as we may get identical VMAs) */ 713 * (the latter is necessary as we may get identical VMAs) */
714 if (vma->vm_start < pvma->vm_start) 714 if (vma->vm_start < pvma->vm_start)
715 p = &(*p)->rb_left; 715 p = &(*p)->rb_left;
716 else if (vma->vm_start > pvma->vm_start) 716 else if (vma->vm_start > pvma->vm_start) {
717 rb_prev = parent;
717 p = &(*p)->rb_right; 718 p = &(*p)->rb_right;
718 else if (vma->vm_end < pvma->vm_end) 719 } else if (vma->vm_end < pvma->vm_end)
719 p = &(*p)->rb_left; 720 p = &(*p)->rb_left;
720 else if (vma->vm_end > pvma->vm_end) 721 else if (vma->vm_end > pvma->vm_end) {
722 rb_prev = parent;
721 p = &(*p)->rb_right; 723 p = &(*p)->rb_right;
722 else if (vma < pvma) 724 } else if (vma < pvma)
723 p = &(*p)->rb_left; 725 p = &(*p)->rb_left;
724 else if (vma > pvma) 726 else if (vma > pvma) {
727 rb_prev = parent;
725 p = &(*p)->rb_right; 728 p = &(*p)->rb_right;
726 else 729 } else
727 BUG(); 730 BUG();
728 } 731 }
729 732
@@ -731,20 +734,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
731 rb_insert_color(&vma->vm_rb, &mm->mm_rb); 734 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
732 735
733 /* add VMA to the VMA list also */ 736 /* add VMA to the VMA list also */
734 for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) { 737 prev = NULL;
735 if (pvma->vm_start > vma->vm_start) 738 if (rb_prev)
736 break; 739 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
737 if (pvma->vm_start < vma->vm_start)
738 continue;
739 if (pvma->vm_end < vma->vm_end)
740 break;
741 }
742 740
743 next = *pp; 741 __vma_link_list(mm, vma, prev, parent);
744 *pp = vma;
745 vma->vm_next = next;
746 if (next)
747 next->vm_prev = vma;
748} 742}
749 743
750/* 744/*
@@ -752,7 +746,6 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
752 */ 746 */
753static void delete_vma_from_mm(struct vm_area_struct *vma) 747static void delete_vma_from_mm(struct vm_area_struct *vma)
754{ 748{
755 struct vm_area_struct **pp;
756 struct address_space *mapping; 749 struct address_space *mapping;
757 struct mm_struct *mm = vma->vm_mm; 750 struct mm_struct *mm = vma->vm_mm;
758 751
@@ -775,12 +768,14 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
775 768
776 /* remove from the MM's tree and list */ 769 /* remove from the MM's tree and list */
777 rb_erase(&vma->vm_rb, &mm->mm_rb); 770 rb_erase(&vma->vm_rb, &mm->mm_rb);
778 for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) { 771
779 if (*pp == vma) { 772 if (vma->vm_prev)
780 *pp = vma->vm_next; 773 vma->vm_prev->vm_next = vma->vm_next;
781 break; 774 else
782 } 775 mm->mmap = vma->vm_next;
783 } 776
777 if (vma->vm_next)
778 vma->vm_next->vm_prev = vma->vm_prev;
784 779
785 vma->vm_mm = NULL; 780 vma->vm_mm = NULL;
786} 781}
@@ -809,17 +804,15 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
809struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 804struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
810{ 805{
811 struct vm_area_struct *vma; 806 struct vm_area_struct *vma;
812 struct rb_node *n = mm->mm_rb.rb_node;
813 807
814 /* check the cache first */ 808 /* check the cache first */
815 vma = mm->mmap_cache; 809 vma = mm->mmap_cache;
816 if (vma && vma->vm_start <= addr && vma->vm_end > addr) 810 if (vma && vma->vm_start <= addr && vma->vm_end > addr)
817 return vma; 811 return vma;
818 812
819 /* trawl the tree (there may be multiple mappings in which addr 813 /* trawl the list (there may be multiple mappings in which addr
820 * resides) */ 814 * resides) */
821 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { 815 for (vma = mm->mmap; vma; vma = vma->vm_next) {
822 vma = rb_entry(n, struct vm_area_struct, vm_rb);
823 if (vma->vm_start > addr) 816 if (vma->vm_start > addr)
824 return NULL; 817 return NULL;
825 if (vma->vm_end > addr) { 818 if (vma->vm_end > addr) {
@@ -859,7 +852,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
859 unsigned long len) 852 unsigned long len)
860{ 853{
861 struct vm_area_struct *vma; 854 struct vm_area_struct *vma;
862 struct rb_node *n = mm->mm_rb.rb_node;
863 unsigned long end = addr + len; 855 unsigned long end = addr + len;
864 856
865 /* check the cache first */ 857 /* check the cache first */
@@ -867,10 +859,9 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
867 if (vma && vma->vm_start == addr && vma->vm_end == end) 859 if (vma && vma->vm_start == addr && vma->vm_end == end)
868 return vma; 860 return vma;
869 861
870 /* trawl the tree (there may be multiple mappings in which addr 862 /* trawl the list (there may be multiple mappings in which addr
871 * resides) */ 863 * resides) */
872 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { 864 for (vma = mm->mmap; vma; vma = vma->vm_next) {
873 vma = rb_entry(n, struct vm_area_struct, vm_rb);
874 if (vma->vm_start < addr) 865 if (vma->vm_start < addr)
875 continue; 866 continue;
876 if (vma->vm_start > addr) 867 if (vma->vm_start > addr)
@@ -1133,7 +1124,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
1133 unsigned long capabilities) 1124 unsigned long capabilities)
1134{ 1125{
1135 struct page *pages; 1126 struct page *pages;
1136 unsigned long total, point, n, rlen; 1127 unsigned long total, point, n;
1137 void *base; 1128 void *base;
1138 int ret, order; 1129 int ret, order;
1139 1130
@@ -1157,13 +1148,12 @@ static int do_mmap_private(struct vm_area_struct *vma,
1157 * make a private copy of the data and map that instead */ 1148 * make a private copy of the data and map that instead */
1158 } 1149 }
1159 1150
1160 rlen = PAGE_ALIGN(len);
1161 1151
1162 /* allocate some memory to hold the mapping 1152 /* allocate some memory to hold the mapping
1163 * - note that this may not return a page-aligned address if the object 1153 * - note that this may not return a page-aligned address if the object
1164 * we're allocating is smaller than a page 1154 * we're allocating is smaller than a page
1165 */ 1155 */
1166 order = get_order(rlen); 1156 order = get_order(len);
1167 kdebug("alloc order %d for %lx", order, len); 1157 kdebug("alloc order %d for %lx", order, len);
1168 1158
1169 pages = alloc_pages(GFP_KERNEL, order); 1159 pages = alloc_pages(GFP_KERNEL, order);
@@ -1173,7 +1163,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
1173 total = 1 << order; 1163 total = 1 << order;
1174 atomic_long_add(total, &mmap_pages_allocated); 1164 atomic_long_add(total, &mmap_pages_allocated);
1175 1165
1176 point = rlen >> PAGE_SHIFT; 1166 point = len >> PAGE_SHIFT;
1177 1167
1178 /* we allocated a power-of-2 sized page set, so we may want to trim off 1168 /* we allocated a power-of-2 sized page set, so we may want to trim off
1179 * the excess */ 1169 * the excess */
@@ -1195,7 +1185,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
1195 base = page_address(pages); 1185 base = page_address(pages);
1196 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; 1186 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1197 region->vm_start = (unsigned long) base; 1187 region->vm_start = (unsigned long) base;
1198 region->vm_end = region->vm_start + rlen; 1188 region->vm_end = region->vm_start + len;
1199 region->vm_top = region->vm_start + (total << PAGE_SHIFT); 1189 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1200 1190
1201 vma->vm_start = region->vm_start; 1191 vma->vm_start = region->vm_start;
@@ -1211,22 +1201,22 @@ static int do_mmap_private(struct vm_area_struct *vma,
1211 1201
1212 old_fs = get_fs(); 1202 old_fs = get_fs();
1213 set_fs(KERNEL_DS); 1203 set_fs(KERNEL_DS);
1214 ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); 1204 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1215 set_fs(old_fs); 1205 set_fs(old_fs);
1216 1206
1217 if (ret < 0) 1207 if (ret < 0)
1218 goto error_free; 1208 goto error_free;
1219 1209
1220 /* clear the last little bit */ 1210 /* clear the last little bit */
1221 if (ret < rlen) 1211 if (ret < len)
1222 memset(base + ret, 0, rlen - ret); 1212 memset(base + ret, 0, len - ret);
1223 1213
1224 } 1214 }
1225 1215
1226 return 0; 1216 return 0;
1227 1217
1228error_free: 1218error_free:
1229 free_page_series(region->vm_start, region->vm_end); 1219 free_page_series(region->vm_start, region->vm_top);
1230 region->vm_start = vma->vm_start = 0; 1220 region->vm_start = vma->vm_start = 0;
1231 region->vm_end = vma->vm_end = 0; 1221 region->vm_end = vma->vm_end = 0;
1232 region->vm_top = 0; 1222 region->vm_top = 0;
@@ -1235,7 +1225,7 @@ error_free:
1235enomem: 1225enomem:
1236 printk("Allocation of length %lu from process %d (%s) failed\n", 1226 printk("Allocation of length %lu from process %d (%s) failed\n",
1237 len, current->pid, current->comm); 1227 len, current->pid, current->comm);
1238 show_free_areas(); 1228 show_free_areas(0);
1239 return -ENOMEM; 1229 return -ENOMEM;
1240} 1230}
1241 1231
@@ -1268,6 +1258,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1268 1258
1269 /* we ignore the address hint */ 1259 /* we ignore the address hint */
1270 addr = 0; 1260 addr = 0;
1261 len = PAGE_ALIGN(len);
1271 1262
1272 /* we've determined that we can make the mapping, now translate what we 1263 /* we've determined that we can make the mapping, now translate what we
1273 * now know into VMA flags */ 1264 * now know into VMA flags */
@@ -1385,15 +1376,15 @@ unsigned long do_mmap_pgoff(struct file *file,
1385 if (capabilities & BDI_CAP_MAP_DIRECT) { 1376 if (capabilities & BDI_CAP_MAP_DIRECT) {
1386 addr = file->f_op->get_unmapped_area(file, addr, len, 1377 addr = file->f_op->get_unmapped_area(file, addr, len,
1387 pgoff, flags); 1378 pgoff, flags);
1388 if (IS_ERR((void *) addr)) { 1379 if (IS_ERR_VALUE(addr)) {
1389 ret = addr; 1380 ret = addr;
1390 if (ret != (unsigned long) -ENOSYS) 1381 if (ret != -ENOSYS)
1391 goto error_just_free; 1382 goto error_just_free;
1392 1383
1393 /* the driver refused to tell us where to site 1384 /* the driver refused to tell us where to site
1394 * the mapping so we'll have to attempt to copy 1385 * the mapping so we'll have to attempt to copy
1395 * it */ 1386 * it */
1396 ret = (unsigned long) -ENODEV; 1387 ret = -ENODEV;
1397 if (!(capabilities & BDI_CAP_MAP_COPY)) 1388 if (!(capabilities & BDI_CAP_MAP_COPY))
1398 goto error_just_free; 1389 goto error_just_free;
1399 1390
@@ -1468,14 +1459,14 @@ error_getting_vma:
1468 printk(KERN_WARNING "Allocation of vma for %lu byte allocation" 1459 printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1469 " from process %d failed\n", 1460 " from process %d failed\n",
1470 len, current->pid); 1461 len, current->pid);
1471 show_free_areas(); 1462 show_free_areas(0);
1472 return -ENOMEM; 1463 return -ENOMEM;
1473 1464
1474error_getting_region: 1465error_getting_region:
1475 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" 1466 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1476 " from process %d failed\n", 1467 " from process %d failed\n",
1477 len, current->pid); 1468 len, current->pid);
1478 show_free_areas(); 1469 show_free_areas(0);
1479 return -ENOMEM; 1470 return -ENOMEM;
1480} 1471}
1481EXPORT_SYMBOL(do_mmap_pgoff); 1472EXPORT_SYMBOL(do_mmap_pgoff);
@@ -1644,15 +1635,17 @@ static int shrink_vma(struct mm_struct *mm,
1644int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) 1635int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1645{ 1636{
1646 struct vm_area_struct *vma; 1637 struct vm_area_struct *vma;
1647 struct rb_node *rb; 1638 unsigned long end;
1648 unsigned long end = start + len;
1649 int ret; 1639 int ret;
1650 1640
1651 kenter(",%lx,%zx", start, len); 1641 kenter(",%lx,%zx", start, len);
1652 1642
1643 len = PAGE_ALIGN(len);
1653 if (len == 0) 1644 if (len == 0)
1654 return -EINVAL; 1645 return -EINVAL;
1655 1646
1647 end = start + len;
1648
1656 /* find the first potentially overlapping VMA */ 1649 /* find the first potentially overlapping VMA */
1657 vma = find_vma(mm, start); 1650 vma = find_vma(mm, start);
1658 if (!vma) { 1651 if (!vma) {
@@ -1677,9 +1670,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1677 } 1670 }
1678 if (end == vma->vm_end) 1671 if (end == vma->vm_end)
1679 goto erase_whole_vma; 1672 goto erase_whole_vma;
1680 rb = rb_next(&vma->vm_rb); 1673 vma = vma->vm_next;
1681 vma = rb_entry(rb, struct vm_area_struct, vm_rb); 1674 } while (vma);
1682 } while (rb);
1683 kleave(" = -EINVAL [split file]"); 1675 kleave(" = -EINVAL [split file]");
1684 return -EINVAL; 1676 return -EINVAL;
1685 } else { 1677 } else {
@@ -1773,6 +1765,8 @@ unsigned long do_mremap(unsigned long addr,
1773 struct vm_area_struct *vma; 1765 struct vm_area_struct *vma;
1774 1766
1775 /* insanity checks first */ 1767 /* insanity checks first */
1768 old_len = PAGE_ALIGN(old_len);
1769 new_len = PAGE_ALIGN(new_len);
1776 if (old_len == 0 || new_len == 0) 1770 if (old_len == 0 || new_len == 0)
1777 return (unsigned long) -EINVAL; 1771 return (unsigned long) -EINVAL;
1778 1772
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f52e85c80e8d..e4b0991ca351 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -38,6 +38,33 @@ int sysctl_oom_kill_allocating_task;
38int sysctl_oom_dump_tasks = 1; 38int sysctl_oom_dump_tasks = 1;
39static DEFINE_SPINLOCK(zone_scan_lock); 39static DEFINE_SPINLOCK(zone_scan_lock);
40 40
41/**
42 * test_set_oom_score_adj() - set current's oom_score_adj and return old value
43 * @new_val: new oom_score_adj value
44 *
45 * Sets the oom_score_adj value for current to @new_val with proper
46 * synchronization and returns the old value. Usually used to temporarily
47 * set a value, save the old value in the caller, and then reinstate it later.
48 */
49int test_set_oom_score_adj(int new_val)
50{
51 struct sighand_struct *sighand = current->sighand;
52 int old_val;
53
54 spin_lock_irq(&sighand->siglock);
55 old_val = current->signal->oom_score_adj;
56 if (new_val != old_val) {
57 if (new_val == OOM_SCORE_ADJ_MIN)
58 atomic_inc(&current->mm->oom_disable_count);
59 else if (old_val == OOM_SCORE_ADJ_MIN)
60 atomic_dec(&current->mm->oom_disable_count);
61 current->signal->oom_score_adj = new_val;
62 }
63 spin_unlock_irq(&sighand->siglock);
64
65 return old_val;
66}
67
41#ifdef CONFIG_NUMA 68#ifdef CONFIG_NUMA
42/** 69/**
43 * has_intersects_mems_allowed() - check task eligiblity for kill 70 * has_intersects_mems_allowed() - check task eligiblity for kill
@@ -155,15 +182,6 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
155 } 182 }
156 183
157 /* 184 /*
158 * When the PF_OOM_ORIGIN bit is set, it indicates the task should have
159 * priority for oom killing.
160 */
161 if (p->flags & PF_OOM_ORIGIN) {
162 task_unlock(p);
163 return 1000;
164 }
165
166 /*
167 * The memory controller may have a limit of 0 bytes, so avoid a divide 185 * The memory controller may have a limit of 0 bytes, so avoid a divide
168 * by zero, if necessary. 186 * by zero, if necessary.
169 */ 187 */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9d5498e2d0f5..2a00f17c3bf4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -30,6 +30,7 @@
30#include <linux/pagevec.h> 30#include <linux/pagevec.h>
31#include <linux/blkdev.h> 31#include <linux/blkdev.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/ratelimit.h>
33#include <linux/oom.h> 34#include <linux/oom.h>
34#include <linux/notifier.h> 35#include <linux/notifier.h>
35#include <linux/topology.h> 36#include <linux/topology.h>
@@ -39,6 +40,7 @@
39#include <linux/memory_hotplug.h> 40#include <linux/memory_hotplug.h>
40#include <linux/nodemask.h> 41#include <linux/nodemask.h>
41#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
43#include <linux/vmstat.h>
42#include <linux/mempolicy.h> 44#include <linux/mempolicy.h>
43#include <linux/stop_machine.h> 45#include <linux/stop_machine.h>
44#include <linux/sort.h> 46#include <linux/sort.h>
@@ -1735,6 +1737,45 @@ static inline bool should_suppress_show_mem(void)
1735 return ret; 1737 return ret;
1736} 1738}
1737 1739
1740static DEFINE_RATELIMIT_STATE(nopage_rs,
1741 DEFAULT_RATELIMIT_INTERVAL,
1742 DEFAULT_RATELIMIT_BURST);
1743
1744void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
1745{
1746 va_list args;
1747 unsigned int filter = SHOW_MEM_FILTER_NODES;
1748
1749 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
1750 return;
1751
1752 /*
1753 * This documents exceptions given to allocations in certain
1754 * contexts that are allowed to allocate outside current's set
1755 * of allowed nodes.
1756 */
1757 if (!(gfp_mask & __GFP_NOMEMALLOC))
1758 if (test_thread_flag(TIF_MEMDIE) ||
1759 (current->flags & (PF_MEMALLOC | PF_EXITING)))
1760 filter &= ~SHOW_MEM_FILTER_NODES;
1761 if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
1762 filter &= ~SHOW_MEM_FILTER_NODES;
1763
1764 if (fmt) {
1765 printk(KERN_WARNING);
1766 va_start(args, fmt);
1767 vprintk(fmt, args);
1768 va_end(args);
1769 }
1770
1771 pr_warning("%s: page allocation failure: order:%d, mode:0x%x\n",
1772 current->comm, order, gfp_mask);
1773
1774 dump_stack();
1775 if (!should_suppress_show_mem())
1776 show_mem(filter);
1777}
1778
1738static inline int 1779static inline int
1739should_alloc_retry(gfp_t gfp_mask, unsigned int order, 1780should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1740 unsigned long pages_reclaimed) 1781 unsigned long pages_reclaimed)
@@ -2065,6 +2106,7 @@ restart:
2065 first_zones_zonelist(zonelist, high_zoneidx, NULL, 2106 first_zones_zonelist(zonelist, high_zoneidx, NULL,
2066 &preferred_zone); 2107 &preferred_zone);
2067 2108
2109rebalance:
2068 /* This is the last chance, in general, before the goto nopage. */ 2110 /* This is the last chance, in general, before the goto nopage. */
2069 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 2111 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2070 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, 2112 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2072,7 +2114,6 @@ restart:
2072 if (page) 2114 if (page)
2073 goto got_pg; 2115 goto got_pg;
2074 2116
2075rebalance:
2076 /* Allocate without watermarks if the context allows */ 2117 /* Allocate without watermarks if the context allows */
2077 if (alloc_flags & ALLOC_NO_WATERMARKS) { 2118 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2078 page = __alloc_pages_high_priority(gfp_mask, order, 2119 page = __alloc_pages_high_priority(gfp_mask, order,
@@ -2106,7 +2147,7 @@ rebalance:
2106 sync_migration); 2147 sync_migration);
2107 if (page) 2148 if (page)
2108 goto got_pg; 2149 goto got_pg;
2109 sync_migration = !(gfp_mask & __GFP_NO_KSWAPD); 2150 sync_migration = true;
2110 2151
2111 /* Try direct reclaim and then allocating */ 2152 /* Try direct reclaim and then allocating */
2112 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2153 page = __alloc_pages_direct_reclaim(gfp_mask, order,
@@ -2177,27 +2218,7 @@ rebalance:
2177 } 2218 }
2178 2219
2179nopage: 2220nopage:
2180 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 2221 warn_alloc_failed(gfp_mask, order, NULL);
2181 unsigned int filter = SHOW_MEM_FILTER_NODES;
2182
2183 /*
2184 * This documents exceptions given to allocations in certain
2185 * contexts that are allowed to allocate outside current's set
2186 * of allowed nodes.
2187 */
2188 if (!(gfp_mask & __GFP_NOMEMALLOC))
2189 if (test_thread_flag(TIF_MEMDIE) ||
2190 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2191 filter &= ~SHOW_MEM_FILTER_NODES;
2192 if (in_interrupt() || !wait)
2193 filter &= ~SHOW_MEM_FILTER_NODES;
2194
2195 pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n",
2196 current->comm, order, gfp_mask);
2197 dump_stack();
2198 if (!should_suppress_show_mem())
2199 show_mem(filter);
2200 }
2201 return page; 2222 return page;
2202got_pg: 2223got_pg:
2203 if (kmemcheck_enabled) 2224 if (kmemcheck_enabled)
@@ -2226,6 +2247,10 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2226 2247
2227 if (should_fail_alloc_page(gfp_mask, order)) 2248 if (should_fail_alloc_page(gfp_mask, order))
2228 return NULL; 2249 return NULL;
2250#ifndef CONFIG_ZONE_DMA
2251 if (WARN_ON_ONCE(gfp_mask & __GFP_DMA))
2252 return NULL;
2253#endif
2229 2254
2230 /* 2255 /*
2231 * Check the zones suitable for the gfp_mask contain at least one 2256 * Check the zones suitable for the gfp_mask contain at least one
@@ -2473,10 +2498,10 @@ void si_meminfo_node(struct sysinfo *val, int nid)
2473#endif 2498#endif
2474 2499
2475/* 2500/*
2476 * Determine whether the zone's node should be displayed or not, depending on 2501 * Determine whether the node should be displayed or not, depending on whether
2477 * whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas(). 2502 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2478 */ 2503 */
2479static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone) 2504bool skip_free_areas_node(unsigned int flags, int nid)
2480{ 2505{
2481 bool ret = false; 2506 bool ret = false;
2482 2507
@@ -2484,8 +2509,7 @@ static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone)
2484 goto out; 2509 goto out;
2485 2510
2486 get_mems_allowed(); 2511 get_mems_allowed();
2487 ret = !node_isset(zone->zone_pgdat->node_id, 2512 ret = !node_isset(nid, cpuset_current_mems_allowed);
2488 cpuset_current_mems_allowed);
2489 put_mems_allowed(); 2513 put_mems_allowed();
2490out: 2514out:
2491 return ret; 2515 return ret;
@@ -2500,13 +2524,13 @@ out:
2500 * Suppresses nodes that are not allowed by current's cpuset if 2524 * Suppresses nodes that are not allowed by current's cpuset if
2501 * SHOW_MEM_FILTER_NODES is passed. 2525 * SHOW_MEM_FILTER_NODES is passed.
2502 */ 2526 */
2503void __show_free_areas(unsigned int filter) 2527void show_free_areas(unsigned int filter)
2504{ 2528{
2505 int cpu; 2529 int cpu;
2506 struct zone *zone; 2530 struct zone *zone;
2507 2531
2508 for_each_populated_zone(zone) { 2532 for_each_populated_zone(zone) {
2509 if (skip_free_areas_zone(filter, zone)) 2533 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2510 continue; 2534 continue;
2511 show_node(zone); 2535 show_node(zone);
2512 printk("%s per-cpu:\n", zone->name); 2536 printk("%s per-cpu:\n", zone->name);
@@ -2549,7 +2573,7 @@ void __show_free_areas(unsigned int filter)
2549 for_each_populated_zone(zone) { 2573 for_each_populated_zone(zone) {
2550 int i; 2574 int i;
2551 2575
2552 if (skip_free_areas_zone(filter, zone)) 2576 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2553 continue; 2577 continue;
2554 show_node(zone); 2578 show_node(zone);
2555 printk("%s" 2579 printk("%s"
@@ -2618,7 +2642,7 @@ void __show_free_areas(unsigned int filter)
2618 for_each_populated_zone(zone) { 2642 for_each_populated_zone(zone) {
2619 unsigned long nr[MAX_ORDER], flags, order, total = 0; 2643 unsigned long nr[MAX_ORDER], flags, order, total = 0;
2620 2644
2621 if (skip_free_areas_zone(filter, zone)) 2645 if (skip_free_areas_node(filter, zone_to_nid(zone)))
2622 continue; 2646 continue;
2623 show_node(zone); 2647 show_node(zone);
2624 printk("%s: ", zone->name); 2648 printk("%s: ", zone->name);
@@ -2639,11 +2663,6 @@ void __show_free_areas(unsigned int filter)
2639 show_swap_cache_info(); 2663 show_swap_cache_info();
2640} 2664}
2641 2665
2642void show_free_areas(void)
2643{
2644 __show_free_areas(0);
2645}
2646
2647static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 2666static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2648{ 2667{
2649 zoneref->zone = zone; 2668 zoneref->zone = zone;
@@ -3314,6 +3333,20 @@ static inline unsigned long wait_table_bits(unsigned long size)
3314#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 3333#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3315 3334
3316/* 3335/*
3336 * Check if a pageblock contains reserved pages
3337 */
3338static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3339{
3340 unsigned long pfn;
3341
3342 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3343 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3344 return 1;
3345 }
3346 return 0;
3347}
3348
3349/*
3317 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 3350 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3318 * of blocks reserved is based on min_wmark_pages(zone). The memory within 3351 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3319 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes 3352 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
@@ -3322,7 +3355,7 @@ static inline unsigned long wait_table_bits(unsigned long size)
3322 */ 3355 */
3323static void setup_zone_migrate_reserve(struct zone *zone) 3356static void setup_zone_migrate_reserve(struct zone *zone)
3324{ 3357{
3325 unsigned long start_pfn, pfn, end_pfn; 3358 unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3326 struct page *page; 3359 struct page *page;
3327 unsigned long block_migratetype; 3360 unsigned long block_migratetype;
3328 int reserve; 3361 int reserve;
@@ -3352,7 +3385,8 @@ static void setup_zone_migrate_reserve(struct zone *zone)
3352 continue; 3385 continue;
3353 3386
3354 /* Blocks with reserved pages will never free, skip them. */ 3387 /* Blocks with reserved pages will never free, skip them. */
3355 if (PageReserved(page)) 3388 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3389 if (pageblock_is_reserved(pfn, block_end_pfn))
3356 continue; 3390 continue;
3357 3391
3358 block_migratetype = get_pageblock_migratetype(page); 3392 block_migratetype = get_pageblock_migratetype(page);
@@ -5100,7 +5134,7 @@ void setup_per_zone_wmarks(void)
5100 * 1TB 101 10GB 5134 * 1TB 101 10GB
5101 * 10TB 320 32GB 5135 * 10TB 320 32GB
5102 */ 5136 */
5103void calculate_zone_inactive_ratio(struct zone *zone) 5137static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5104{ 5138{
5105 unsigned int gb, ratio; 5139 unsigned int gb, ratio;
5106 5140
@@ -5114,7 +5148,7 @@ void calculate_zone_inactive_ratio(struct zone *zone)
5114 zone->inactive_ratio = ratio; 5148 zone->inactive_ratio = ratio;
5115} 5149}
5116 5150
5117static void __init setup_per_zone_inactive_ratio(void) 5151static void __meminit setup_per_zone_inactive_ratio(void)
5118{ 5152{
5119 struct zone *zone; 5153 struct zone *zone;
5120 5154
@@ -5146,7 +5180,7 @@ static void __init setup_per_zone_inactive_ratio(void)
5146 * 8192MB: 11584k 5180 * 8192MB: 11584k
5147 * 16384MB: 16384k 5181 * 16384MB: 16384k
5148 */ 5182 */
5149static int __init init_per_zone_wmark_min(void) 5183int __meminit init_per_zone_wmark_min(void)
5150{ 5184{
5151 unsigned long lowmem_kbytes; 5185 unsigned long lowmem_kbytes;
5152 5186
@@ -5158,6 +5192,7 @@ static int __init init_per_zone_wmark_min(void)
5158 if (min_free_kbytes > 65536) 5192 if (min_free_kbytes > 65536)
5159 min_free_kbytes = 65536; 5193 min_free_kbytes = 65536;
5160 setup_per_zone_wmarks(); 5194 setup_per_zone_wmarks();
5195 refresh_zone_stat_thresholds();
5161 setup_per_zone_lowmem_reserve(); 5196 setup_per_zone_lowmem_reserve();
5162 setup_per_zone_inactive_ratio(); 5197 setup_per_zone_inactive_ratio();
5163 return 0; 5198 return 0;
@@ -5508,10 +5543,8 @@ int set_migratetype_isolate(struct page *page)
5508 struct memory_isolate_notify arg; 5543 struct memory_isolate_notify arg;
5509 int notifier_ret; 5544 int notifier_ret;
5510 int ret = -EBUSY; 5545 int ret = -EBUSY;
5511 int zone_idx;
5512 5546
5513 zone = page_zone(page); 5547 zone = page_zone(page);
5514 zone_idx = zone_idx(zone);
5515 5548
5516 spin_lock_irqsave(&zone->lock, flags); 5549 spin_lock_irqsave(&zone->lock, flags);
5517 5550
diff --git a/mm/readahead.c b/mm/readahead.c
index 2c0cc489e288..867f9dd82dcd 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -180,7 +180,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
180 if (page) 180 if (page)
181 continue; 181 continue;
182 182
183 page = page_cache_alloc_cold(mapping); 183 page = page_cache_alloc_readahead(mapping);
184 if (!page) 184 if (!page)
185 break; 185 break;
186 page->index = page_offset; 186 page->index = page_offset;
diff --git a/mm/rmap.c b/mm/rmap.c
index 522e4a93cadd..3a39b518a653 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -24,8 +24,8 @@
24 * inode->i_alloc_sem (vmtruncate_range) 24 * inode->i_alloc_sem (vmtruncate_range)
25 * mm->mmap_sem 25 * mm->mmap_sem
26 * page->flags PG_locked (lock_page) 26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock 27 * mapping->i_mmap_mutex
28 * anon_vma->lock 28 * anon_vma->mutex
29 * mm->page_table_lock or pte_lock 29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get) 31 * swap_lock (in swap_duplicate, swap_info_get)
@@ -40,7 +40,7 @@
40 * 40 *
41 * (code doesn't rely on that order so it could be switched around) 41 * (code doesn't rely on that order so it could be switched around)
42 * ->tasklist_lock 42 * ->tasklist_lock
43 * anon_vma->lock (memory_failure, collect_procs_anon) 43 * anon_vma->mutex (memory_failure, collect_procs_anon)
44 * pte map lock 44 * pte map lock
45 */ 45 */
46 46
@@ -86,6 +86,29 @@ static inline struct anon_vma *anon_vma_alloc(void)
86static inline void anon_vma_free(struct anon_vma *anon_vma) 86static inline void anon_vma_free(struct anon_vma *anon_vma)
87{ 87{
88 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 88 VM_BUG_ON(atomic_read(&anon_vma->refcount));
89
90 /*
91 * Synchronize against page_lock_anon_vma() such that
92 * we can safely hold the lock without the anon_vma getting
93 * freed.
94 *
95 * Relies on the full mb implied by the atomic_dec_and_test() from
96 * put_anon_vma() against the acquire barrier implied by
97 * mutex_trylock() from page_lock_anon_vma(). This orders:
98 *
99 * page_lock_anon_vma() VS put_anon_vma()
100 * mutex_trylock() atomic_dec_and_test()
101 * LOCK MB
102 * atomic_read() mutex_is_locked()
103 *
104 * LOCK should suffice since the actual taking of the lock must
105 * happen _before_ what follows.
106 */
107 if (mutex_is_locked(&anon_vma->root->mutex)) {
108 anon_vma_lock(anon_vma);
109 anon_vma_unlock(anon_vma);
110 }
111
89 kmem_cache_free(anon_vma_cachep, anon_vma); 112 kmem_cache_free(anon_vma_cachep, anon_vma);
90} 113}
91 114
@@ -307,7 +330,7 @@ static void anon_vma_ctor(void *data)
307{ 330{
308 struct anon_vma *anon_vma = data; 331 struct anon_vma *anon_vma = data;
309 332
310 spin_lock_init(&anon_vma->lock); 333 mutex_init(&anon_vma->mutex);
311 atomic_set(&anon_vma->refcount, 0); 334 atomic_set(&anon_vma->refcount, 0);
312 INIT_LIST_HEAD(&anon_vma->head); 335 INIT_LIST_HEAD(&anon_vma->head);
313} 336}
@@ -320,12 +343,26 @@ void __init anon_vma_init(void)
320} 343}
321 344
322/* 345/*
323 * Getting a lock on a stable anon_vma from a page off the LRU is 346 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
324 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 347 *
348 * Since there is no serialization what so ever against page_remove_rmap()
349 * the best this function can do is return a locked anon_vma that might
350 * have been relevant to this page.
351 *
352 * The page might have been remapped to a different anon_vma or the anon_vma
353 * returned may already be freed (and even reused).
354 *
355 * All users of this function must be very careful when walking the anon_vma
356 * chain and verify that the page in question is indeed mapped in it
357 * [ something equivalent to page_mapped_in_vma() ].
358 *
359 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
360 * that the anon_vma pointer from page->mapping is valid if there is a
361 * mapcount, we can dereference the anon_vma after observing those.
325 */ 362 */
326struct anon_vma *__page_lock_anon_vma(struct page *page) 363struct anon_vma *page_get_anon_vma(struct page *page)
327{ 364{
328 struct anon_vma *anon_vma, *root_anon_vma; 365 struct anon_vma *anon_vma = NULL;
329 unsigned long anon_mapping; 366 unsigned long anon_mapping;
330 367
331 rcu_read_lock(); 368 rcu_read_lock();
@@ -336,32 +373,97 @@ struct anon_vma *__page_lock_anon_vma(struct page *page)
336 goto out; 373 goto out;
337 374
338 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 375 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
339 root_anon_vma = ACCESS_ONCE(anon_vma->root); 376 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
340 spin_lock(&root_anon_vma->lock); 377 anon_vma = NULL;
378 goto out;
379 }
341 380
342 /* 381 /*
343 * If this page is still mapped, then its anon_vma cannot have been 382 * If this page is still mapped, then its anon_vma cannot have been
344 * freed. But if it has been unmapped, we have no security against 383 * freed. But if it has been unmapped, we have no security against the
345 * the anon_vma structure being freed and reused (for another anon_vma: 384 * anon_vma structure being freed and reused (for another anon_vma:
346 * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot 385 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
347 * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting 386 * above cannot corrupt).
348 * anon_vma->root before page_unlock_anon_vma() is called to unlock.
349 */ 387 */
350 if (page_mapped(page)) 388 if (!page_mapped(page)) {
351 return anon_vma; 389 put_anon_vma(anon_vma);
390 anon_vma = NULL;
391 }
392out:
393 rcu_read_unlock();
394
395 return anon_vma;
396}
397
398/*
399 * Similar to page_get_anon_vma() except it locks the anon_vma.
400 *
401 * Its a little more complex as it tries to keep the fast path to a single
402 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
403 * reference like with page_get_anon_vma() and then block on the mutex.
404 */
405struct anon_vma *page_lock_anon_vma(struct page *page)
406{
407 struct anon_vma *anon_vma = NULL;
408 unsigned long anon_mapping;
409
410 rcu_read_lock();
411 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
412 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
413 goto out;
414 if (!page_mapped(page))
415 goto out;
416
417 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
418 if (mutex_trylock(&anon_vma->root->mutex)) {
419 /*
420 * If we observe a !0 refcount, then holding the lock ensures
421 * the anon_vma will not go away, see __put_anon_vma().
422 */
423 if (!atomic_read(&anon_vma->refcount)) {
424 anon_vma_unlock(anon_vma);
425 anon_vma = NULL;
426 }
427 goto out;
428 }
429
430 /* trylock failed, we got to sleep */
431 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
432 anon_vma = NULL;
433 goto out;
434 }
435
436 if (!page_mapped(page)) {
437 put_anon_vma(anon_vma);
438 anon_vma = NULL;
439 goto out;
440 }
441
442 /* we pinned the anon_vma, its safe to sleep */
443 rcu_read_unlock();
444 anon_vma_lock(anon_vma);
445
446 if (atomic_dec_and_test(&anon_vma->refcount)) {
447 /*
448 * Oops, we held the last refcount, release the lock
449 * and bail -- can't simply use put_anon_vma() because
450 * we'll deadlock on the anon_vma_lock() recursion.
451 */
452 anon_vma_unlock(anon_vma);
453 __put_anon_vma(anon_vma);
454 anon_vma = NULL;
455 }
456
457 return anon_vma;
352 458
353 spin_unlock(&root_anon_vma->lock);
354out: 459out:
355 rcu_read_unlock(); 460 rcu_read_unlock();
356 return NULL; 461 return anon_vma;
357} 462}
358 463
359void page_unlock_anon_vma(struct anon_vma *anon_vma) 464void page_unlock_anon_vma(struct anon_vma *anon_vma)
360 __releases(&anon_vma->root->lock)
361 __releases(RCU)
362{ 465{
363 anon_vma_unlock(anon_vma); 466 anon_vma_unlock(anon_vma);
364 rcu_read_unlock();
365} 467}
366 468
367/* 469/*
@@ -646,14 +748,14 @@ static int page_referenced_file(struct page *page,
646 * The page lock not only makes sure that page->mapping cannot 748 * The page lock not only makes sure that page->mapping cannot
647 * suddenly be NULLified by truncation, it makes sure that the 749 * suddenly be NULLified by truncation, it makes sure that the
648 * structure at mapping cannot be freed and reused yet, 750 * structure at mapping cannot be freed and reused yet,
649 * so we can safely take mapping->i_mmap_lock. 751 * so we can safely take mapping->i_mmap_mutex.
650 */ 752 */
651 BUG_ON(!PageLocked(page)); 753 BUG_ON(!PageLocked(page));
652 754
653 spin_lock(&mapping->i_mmap_lock); 755 mutex_lock(&mapping->i_mmap_mutex);
654 756
655 /* 757 /*
656 * i_mmap_lock does not stabilize mapcount at all, but mapcount 758 * i_mmap_mutex does not stabilize mapcount at all, but mapcount
657 * is more likely to be accurate if we note it after spinning. 759 * is more likely to be accurate if we note it after spinning.
658 */ 760 */
659 mapcount = page_mapcount(page); 761 mapcount = page_mapcount(page);
@@ -675,7 +777,7 @@ static int page_referenced_file(struct page *page,
675 break; 777 break;
676 } 778 }
677 779
678 spin_unlock(&mapping->i_mmap_lock); 780 mutex_unlock(&mapping->i_mmap_mutex);
679 return referenced; 781 return referenced;
680} 782}
681 783
@@ -762,7 +864,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
762 864
763 BUG_ON(PageAnon(page)); 865 BUG_ON(PageAnon(page));
764 866
765 spin_lock(&mapping->i_mmap_lock); 867 mutex_lock(&mapping->i_mmap_mutex);
766 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 868 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
767 if (vma->vm_flags & VM_SHARED) { 869 if (vma->vm_flags & VM_SHARED) {
768 unsigned long address = vma_address(page, vma); 870 unsigned long address = vma_address(page, vma);
@@ -771,7 +873,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
771 ret += page_mkclean_one(page, vma, address); 873 ret += page_mkclean_one(page, vma, address);
772 } 874 }
773 } 875 }
774 spin_unlock(&mapping->i_mmap_lock); 876 mutex_unlock(&mapping->i_mmap_mutex);
775 return ret; 877 return ret;
776} 878}
777 879
@@ -1119,7 +1221,7 @@ out_mlock:
1119 /* 1221 /*
1120 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1222 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1121 * unstable result and race. Plus, We can't wait here because 1223 * unstable result and race. Plus, We can't wait here because
1122 * we now hold anon_vma->lock or mapping->i_mmap_lock. 1224 * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
1123 * if trylock failed, the page remain in evictable lru and later 1225 * if trylock failed, the page remain in evictable lru and later
1124 * vmscan could retry to move the page to unevictable lru if the 1226 * vmscan could retry to move the page to unevictable lru if the
1125 * page is actually mlocked. 1227 * page is actually mlocked.
@@ -1345,7 +1447,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1345 unsigned long max_nl_size = 0; 1447 unsigned long max_nl_size = 0;
1346 unsigned int mapcount; 1448 unsigned int mapcount;
1347 1449
1348 spin_lock(&mapping->i_mmap_lock); 1450 mutex_lock(&mapping->i_mmap_mutex);
1349 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1451 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1350 unsigned long address = vma_address(page, vma); 1452 unsigned long address = vma_address(page, vma);
1351 if (address == -EFAULT) 1453 if (address == -EFAULT)
@@ -1391,7 +1493,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1391 mapcount = page_mapcount(page); 1493 mapcount = page_mapcount(page);
1392 if (!mapcount) 1494 if (!mapcount)
1393 goto out; 1495 goto out;
1394 cond_resched_lock(&mapping->i_mmap_lock); 1496 cond_resched();
1395 1497
1396 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1498 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1397 if (max_nl_cursor == 0) 1499 if (max_nl_cursor == 0)
@@ -1413,7 +1515,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1413 } 1515 }
1414 vma->vm_private_data = (void *) max_nl_cursor; 1516 vma->vm_private_data = (void *) max_nl_cursor;
1415 } 1517 }
1416 cond_resched_lock(&mapping->i_mmap_lock); 1518 cond_resched();
1417 max_nl_cursor += CLUSTER_SIZE; 1519 max_nl_cursor += CLUSTER_SIZE;
1418 } while (max_nl_cursor <= max_nl_size); 1520 } while (max_nl_cursor <= max_nl_size);
1419 1521
@@ -1425,7 +1527,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1425 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1527 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1426 vma->vm_private_data = NULL; 1528 vma->vm_private_data = NULL;
1427out: 1529out:
1428 spin_unlock(&mapping->i_mmap_lock); 1530 mutex_unlock(&mapping->i_mmap_mutex);
1429 return ret; 1531 return ret;
1430} 1532}
1431 1533
@@ -1544,7 +1646,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1544 1646
1545 if (!mapping) 1647 if (!mapping)
1546 return ret; 1648 return ret;
1547 spin_lock(&mapping->i_mmap_lock); 1649 mutex_lock(&mapping->i_mmap_mutex);
1548 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1650 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1549 unsigned long address = vma_address(page, vma); 1651 unsigned long address = vma_address(page, vma);
1550 if (address == -EFAULT) 1652 if (address == -EFAULT)
@@ -1558,7 +1660,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1558 * never contain migration ptes. Decide what to do about this 1660 * never contain migration ptes. Decide what to do about this
1559 * limitation to linear when we need rmap_walk() on nonlinear. 1661 * limitation to linear when we need rmap_walk() on nonlinear.
1560 */ 1662 */
1561 spin_unlock(&mapping->i_mmap_lock); 1663 mutex_unlock(&mapping->i_mmap_mutex);
1562 return ret; 1664 return ret;
1563} 1665}
1564 1666
diff --git a/mm/shmem.c b/mm/shmem.c
index ba4ad28b7db6..69edb45a9f28 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -99,6 +99,13 @@ static struct vfsmount *shm_mnt;
99/* Pretend that each entry is of this size in directory's i_size */ 99/* Pretend that each entry is of this size in directory's i_size */
100#define BOGO_DIRENT_SIZE 20 100#define BOGO_DIRENT_SIZE 20
101 101
102struct shmem_xattr {
103 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
104 char *name; /* xattr name */
105 size_t size;
106 char value[0];
107};
108
102/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ 109/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
103enum sgp_type { 110enum sgp_type {
104 SGP_READ, /* don't exceed i_size, don't allocate page */ 111 SGP_READ, /* don't exceed i_size, don't allocate page */
@@ -822,6 +829,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
822static void shmem_evict_inode(struct inode *inode) 829static void shmem_evict_inode(struct inode *inode)
823{ 830{
824 struct shmem_inode_info *info = SHMEM_I(inode); 831 struct shmem_inode_info *info = SHMEM_I(inode);
832 struct shmem_xattr *xattr, *nxattr;
825 833
826 if (inode->i_mapping->a_ops == &shmem_aops) { 834 if (inode->i_mapping->a_ops == &shmem_aops) {
827 truncate_inode_pages(inode->i_mapping, 0); 835 truncate_inode_pages(inode->i_mapping, 0);
@@ -834,6 +842,11 @@ static void shmem_evict_inode(struct inode *inode)
834 mutex_unlock(&shmem_swaplist_mutex); 842 mutex_unlock(&shmem_swaplist_mutex);
835 } 843 }
836 } 844 }
845
846 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
847 kfree(xattr->name);
848 kfree(xattr);
849 }
837 BUG_ON(inode->i_blocks); 850 BUG_ON(inode->i_blocks);
838 shmem_free_inode(inode->i_sb); 851 shmem_free_inode(inode->i_sb);
839 end_writeback(inode); 852 end_writeback(inode);
@@ -1615,6 +1628,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
1615 spin_lock_init(&info->lock); 1628 spin_lock_init(&info->lock);
1616 info->flags = flags & VM_NORESERVE; 1629 info->flags = flags & VM_NORESERVE;
1617 INIT_LIST_HEAD(&info->swaplist); 1630 INIT_LIST_HEAD(&info->swaplist);
1631 INIT_LIST_HEAD(&info->xattr_list);
1618 cache_no_acl(inode); 1632 cache_no_acl(inode);
1619 1633
1620 switch (mode & S_IFMT) { 1634 switch (mode & S_IFMT) {
@@ -2014,9 +2028,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2014 2028
2015 info = SHMEM_I(inode); 2029 info = SHMEM_I(inode);
2016 inode->i_size = len-1; 2030 inode->i_size = len-1;
2017 if (len <= (char *)inode - (char *)info) { 2031 if (len <= SHMEM_SYMLINK_INLINE_LEN) {
2018 /* do it inline */ 2032 /* do it inline */
2019 memcpy(info, symname, len); 2033 memcpy(info->inline_symlink, symname, len);
2020 inode->i_op = &shmem_symlink_inline_operations; 2034 inode->i_op = &shmem_symlink_inline_operations;
2021 } else { 2035 } else {
2022 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2036 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
@@ -2042,7 +2056,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
2042 2056
2043static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 2057static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
2044{ 2058{
2045 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); 2059 nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
2046 return NULL; 2060 return NULL;
2047} 2061}
2048 2062
@@ -2066,63 +2080,253 @@ static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *co
2066 } 2080 }
2067} 2081}
2068 2082
2069static const struct inode_operations shmem_symlink_inline_operations = { 2083#ifdef CONFIG_TMPFS_XATTR
2070 .readlink = generic_readlink,
2071 .follow_link = shmem_follow_link_inline,
2072};
2073
2074static const struct inode_operations shmem_symlink_inode_operations = {
2075 .readlink = generic_readlink,
2076 .follow_link = shmem_follow_link,
2077 .put_link = shmem_put_link,
2078};
2079
2080#ifdef CONFIG_TMPFS_POSIX_ACL
2081/* 2084/*
2082 * Superblocks without xattr inode operations will get security.* xattr 2085 * Superblocks without xattr inode operations may get some security.* xattr
2083 * support from the VFS "for free". As soon as we have any other xattrs 2086 * support from the LSM "for free". As soon as we have any other xattrs
2084 * like ACLs, we also need to implement the security.* handlers at 2087 * like ACLs, we also need to implement the security.* handlers at
2085 * filesystem level, though. 2088 * filesystem level, though.
2086 */ 2089 */
2087 2090
2088static size_t shmem_xattr_security_list(struct dentry *dentry, char *list, 2091static int shmem_xattr_get(struct dentry *dentry, const char *name,
2089 size_t list_len, const char *name, 2092 void *buffer, size_t size)
2090 size_t name_len, int handler_flags)
2091{ 2093{
2092 return security_inode_listsecurity(dentry->d_inode, list, list_len); 2094 struct shmem_inode_info *info;
2093} 2095 struct shmem_xattr *xattr;
2096 int ret = -ENODATA;
2094 2097
2095static int shmem_xattr_security_get(struct dentry *dentry, const char *name, 2098 info = SHMEM_I(dentry->d_inode);
2096 void *buffer, size_t size, int handler_flags) 2099
2097{ 2100 spin_lock(&info->lock);
2098 if (strcmp(name, "") == 0) 2101 list_for_each_entry(xattr, &info->xattr_list, list) {
2099 return -EINVAL; 2102 if (strcmp(name, xattr->name))
2100 return xattr_getsecurity(dentry->d_inode, name, buffer, size); 2103 continue;
2104
2105 ret = xattr->size;
2106 if (buffer) {
2107 if (size < xattr->size)
2108 ret = -ERANGE;
2109 else
2110 memcpy(buffer, xattr->value, xattr->size);
2111 }
2112 break;
2113 }
2114 spin_unlock(&info->lock);
2115 return ret;
2101} 2116}
2102 2117
2103static int shmem_xattr_security_set(struct dentry *dentry, const char *name, 2118static int shmem_xattr_set(struct dentry *dentry, const char *name,
2104 const void *value, size_t size, int flags, int handler_flags) 2119 const void *value, size_t size, int flags)
2105{ 2120{
2106 if (strcmp(name, "") == 0) 2121 struct inode *inode = dentry->d_inode;
2107 return -EINVAL; 2122 struct shmem_inode_info *info = SHMEM_I(inode);
2108 return security_inode_setsecurity(dentry->d_inode, name, value, 2123 struct shmem_xattr *xattr;
2109 size, flags); 2124 struct shmem_xattr *new_xattr = NULL;
2125 size_t len;
2126 int err = 0;
2127
2128 /* value == NULL means remove */
2129 if (value) {
2130 /* wrap around? */
2131 len = sizeof(*new_xattr) + size;
2132 if (len <= sizeof(*new_xattr))
2133 return -ENOMEM;
2134
2135 new_xattr = kmalloc(len, GFP_KERNEL);
2136 if (!new_xattr)
2137 return -ENOMEM;
2138
2139 new_xattr->name = kstrdup(name, GFP_KERNEL);
2140 if (!new_xattr->name) {
2141 kfree(new_xattr);
2142 return -ENOMEM;
2143 }
2144
2145 new_xattr->size = size;
2146 memcpy(new_xattr->value, value, size);
2147 }
2148
2149 spin_lock(&info->lock);
2150 list_for_each_entry(xattr, &info->xattr_list, list) {
2151 if (!strcmp(name, xattr->name)) {
2152 if (flags & XATTR_CREATE) {
2153 xattr = new_xattr;
2154 err = -EEXIST;
2155 } else if (new_xattr) {
2156 list_replace(&xattr->list, &new_xattr->list);
2157 } else {
2158 list_del(&xattr->list);
2159 }
2160 goto out;
2161 }
2162 }
2163 if (flags & XATTR_REPLACE) {
2164 xattr = new_xattr;
2165 err = -ENODATA;
2166 } else {
2167 list_add(&new_xattr->list, &info->xattr_list);
2168 xattr = NULL;
2169 }
2170out:
2171 spin_unlock(&info->lock);
2172 if (xattr)
2173 kfree(xattr->name);
2174 kfree(xattr);
2175 return err;
2110} 2176}
2111 2177
2112static const struct xattr_handler shmem_xattr_security_handler = {
2113 .prefix = XATTR_SECURITY_PREFIX,
2114 .list = shmem_xattr_security_list,
2115 .get = shmem_xattr_security_get,
2116 .set = shmem_xattr_security_set,
2117};
2118 2178
2119static const struct xattr_handler *shmem_xattr_handlers[] = { 2179static const struct xattr_handler *shmem_xattr_handlers[] = {
2180#ifdef CONFIG_TMPFS_POSIX_ACL
2120 &generic_acl_access_handler, 2181 &generic_acl_access_handler,
2121 &generic_acl_default_handler, 2182 &generic_acl_default_handler,
2122 &shmem_xattr_security_handler, 2183#endif
2123 NULL 2184 NULL
2124}; 2185};
2186
2187static int shmem_xattr_validate(const char *name)
2188{
2189 struct { const char *prefix; size_t len; } arr[] = {
2190 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2191 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2192 };
2193 int i;
2194
2195 for (i = 0; i < ARRAY_SIZE(arr); i++) {
2196 size_t preflen = arr[i].len;
2197 if (strncmp(name, arr[i].prefix, preflen) == 0) {
2198 if (!name[preflen])
2199 return -EINVAL;
2200 return 0;
2201 }
2202 }
2203 return -EOPNOTSUPP;
2204}
2205
2206static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2207 void *buffer, size_t size)
2208{
2209 int err;
2210
2211 /*
2212 * If this is a request for a synthetic attribute in the system.*
2213 * namespace use the generic infrastructure to resolve a handler
2214 * for it via sb->s_xattr.
2215 */
2216 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2217 return generic_getxattr(dentry, name, buffer, size);
2218
2219 err = shmem_xattr_validate(name);
2220 if (err)
2221 return err;
2222
2223 return shmem_xattr_get(dentry, name, buffer, size);
2224}
2225
2226static int shmem_setxattr(struct dentry *dentry, const char *name,
2227 const void *value, size_t size, int flags)
2228{
2229 int err;
2230
2231 /*
2232 * If this is a request for a synthetic attribute in the system.*
2233 * namespace use the generic infrastructure to resolve a handler
2234 * for it via sb->s_xattr.
2235 */
2236 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2237 return generic_setxattr(dentry, name, value, size, flags);
2238
2239 err = shmem_xattr_validate(name);
2240 if (err)
2241 return err;
2242
2243 if (size == 0)
2244 value = ""; /* empty EA, do not remove */
2245
2246 return shmem_xattr_set(dentry, name, value, size, flags);
2247
2248}
2249
2250static int shmem_removexattr(struct dentry *dentry, const char *name)
2251{
2252 int err;
2253
2254 /*
2255 * If this is a request for a synthetic attribute in the system.*
2256 * namespace use the generic infrastructure to resolve a handler
2257 * for it via sb->s_xattr.
2258 */
2259 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2260 return generic_removexattr(dentry, name);
2261
2262 err = shmem_xattr_validate(name);
2263 if (err)
2264 return err;
2265
2266 return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
2267}
2268
2269static bool xattr_is_trusted(const char *name)
2270{
2271 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2272}
2273
2274static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2275{
2276 bool trusted = capable(CAP_SYS_ADMIN);
2277 struct shmem_xattr *xattr;
2278 struct shmem_inode_info *info;
2279 size_t used = 0;
2280
2281 info = SHMEM_I(dentry->d_inode);
2282
2283 spin_lock(&info->lock);
2284 list_for_each_entry(xattr, &info->xattr_list, list) {
2285 size_t len;
2286
2287 /* skip "trusted." attributes for unprivileged callers */
2288 if (!trusted && xattr_is_trusted(xattr->name))
2289 continue;
2290
2291 len = strlen(xattr->name) + 1;
2292 used += len;
2293 if (buffer) {
2294 if (size < used) {
2295 used = -ERANGE;
2296 break;
2297 }
2298 memcpy(buffer, xattr->name, len);
2299 buffer += len;
2300 }
2301 }
2302 spin_unlock(&info->lock);
2303
2304 return used;
2305}
2306#endif /* CONFIG_TMPFS_XATTR */
2307
2308static const struct inode_operations shmem_symlink_inline_operations = {
2309 .readlink = generic_readlink,
2310 .follow_link = shmem_follow_link_inline,
2311#ifdef CONFIG_TMPFS_XATTR
2312 .setxattr = shmem_setxattr,
2313 .getxattr = shmem_getxattr,
2314 .listxattr = shmem_listxattr,
2315 .removexattr = shmem_removexattr,
2316#endif
2317};
2318
2319static const struct inode_operations shmem_symlink_inode_operations = {
2320 .readlink = generic_readlink,
2321 .follow_link = shmem_follow_link,
2322 .put_link = shmem_put_link,
2323#ifdef CONFIG_TMPFS_XATTR
2324 .setxattr = shmem_setxattr,
2325 .getxattr = shmem_getxattr,
2326 .listxattr = shmem_listxattr,
2327 .removexattr = shmem_removexattr,
2125#endif 2328#endif
2329};
2126 2330
2127static struct dentry *shmem_get_parent(struct dentry *child) 2331static struct dentry *shmem_get_parent(struct dentry *child)
2128{ 2332{
@@ -2402,8 +2606,10 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
2402 sb->s_magic = TMPFS_MAGIC; 2606 sb->s_magic = TMPFS_MAGIC;
2403 sb->s_op = &shmem_ops; 2607 sb->s_op = &shmem_ops;
2404 sb->s_time_gran = 1; 2608 sb->s_time_gran = 1;
2405#ifdef CONFIG_TMPFS_POSIX_ACL 2609#ifdef CONFIG_TMPFS_XATTR
2406 sb->s_xattr = shmem_xattr_handlers; 2610 sb->s_xattr = shmem_xattr_handlers;
2611#endif
2612#ifdef CONFIG_TMPFS_POSIX_ACL
2407 sb->s_flags |= MS_POSIXACL; 2613 sb->s_flags |= MS_POSIXACL;
2408#endif 2614#endif
2409 2615
@@ -2501,11 +2707,13 @@ static const struct file_operations shmem_file_operations = {
2501static const struct inode_operations shmem_inode_operations = { 2707static const struct inode_operations shmem_inode_operations = {
2502 .setattr = shmem_notify_change, 2708 .setattr = shmem_notify_change,
2503 .truncate_range = shmem_truncate_range, 2709 .truncate_range = shmem_truncate_range,
2710#ifdef CONFIG_TMPFS_XATTR
2711 .setxattr = shmem_setxattr,
2712 .getxattr = shmem_getxattr,
2713 .listxattr = shmem_listxattr,
2714 .removexattr = shmem_removexattr,
2715#endif
2504#ifdef CONFIG_TMPFS_POSIX_ACL 2716#ifdef CONFIG_TMPFS_POSIX_ACL
2505 .setxattr = generic_setxattr,
2506 .getxattr = generic_getxattr,
2507 .listxattr = generic_listxattr,
2508 .removexattr = generic_removexattr,
2509 .check_acl = generic_check_acl, 2717 .check_acl = generic_check_acl,
2510#endif 2718#endif
2511 2719
@@ -2523,23 +2731,27 @@ static const struct inode_operations shmem_dir_inode_operations = {
2523 .mknod = shmem_mknod, 2731 .mknod = shmem_mknod,
2524 .rename = shmem_rename, 2732 .rename = shmem_rename,
2525#endif 2733#endif
2734#ifdef CONFIG_TMPFS_XATTR
2735 .setxattr = shmem_setxattr,
2736 .getxattr = shmem_getxattr,
2737 .listxattr = shmem_listxattr,
2738 .removexattr = shmem_removexattr,
2739#endif
2526#ifdef CONFIG_TMPFS_POSIX_ACL 2740#ifdef CONFIG_TMPFS_POSIX_ACL
2527 .setattr = shmem_notify_change, 2741 .setattr = shmem_notify_change,
2528 .setxattr = generic_setxattr,
2529 .getxattr = generic_getxattr,
2530 .listxattr = generic_listxattr,
2531 .removexattr = generic_removexattr,
2532 .check_acl = generic_check_acl, 2742 .check_acl = generic_check_acl,
2533#endif 2743#endif
2534}; 2744};
2535 2745
2536static const struct inode_operations shmem_special_inode_operations = { 2746static const struct inode_operations shmem_special_inode_operations = {
2747#ifdef CONFIG_TMPFS_XATTR
2748 .setxattr = shmem_setxattr,
2749 .getxattr = shmem_getxattr,
2750 .listxattr = shmem_listxattr,
2751 .removexattr = shmem_removexattr,
2752#endif
2537#ifdef CONFIG_TMPFS_POSIX_ACL 2753#ifdef CONFIG_TMPFS_POSIX_ACL
2538 .setattr = shmem_notify_change, 2754 .setattr = shmem_notify_change,
2539 .setxattr = generic_setxattr,
2540 .getxattr = generic_getxattr,
2541 .listxattr = generic_listxattr,
2542 .removexattr = generic_removexattr,
2543 .check_acl = generic_check_acl, 2755 .check_acl = generic_check_acl,
2544#endif 2756#endif
2545}; 2757};
diff --git a/mm/slub.c b/mm/slub.c
index 4ea7f1a22a94..4aad32d2e60d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1884,7 +1884,8 @@ debug:
1884 deactivate_slab(s, c); 1884 deactivate_slab(s, c);
1885 c->page = NULL; 1885 c->page = NULL;
1886 c->node = NUMA_NO_NODE; 1886 c->node = NUMA_NO_NODE;
1887 goto unlock_out; 1887 local_irq_restore(flags);
1888 return object;
1888} 1889}
1889 1890
1890/* 1891/*
diff --git a/mm/swap.c b/mm/swap.c
index 5602f1a1b1e7..3a442f18b0b3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -272,14 +272,10 @@ static void update_page_reclaim_stat(struct zone *zone, struct page *page,
272 memcg_reclaim_stat->recent_rotated[file]++; 272 memcg_reclaim_stat->recent_rotated[file]++;
273} 273}
274 274
275/* 275static void __activate_page(struct page *page, void *arg)
276 * FIXME: speed this up?
277 */
278void activate_page(struct page *page)
279{ 276{
280 struct zone *zone = page_zone(page); 277 struct zone *zone = page_zone(page);
281 278
282 spin_lock_irq(&zone->lru_lock);
283 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 279 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
284 int file = page_is_file_cache(page); 280 int file = page_is_file_cache(page);
285 int lru = page_lru_base_type(page); 281 int lru = page_lru_base_type(page);
@@ -292,8 +288,45 @@ void activate_page(struct page *page)
292 288
293 update_page_reclaim_stat(zone, page, file, 1); 289 update_page_reclaim_stat(zone, page, file, 1);
294 } 290 }
291}
292
293#ifdef CONFIG_SMP
294static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
295
296static void activate_page_drain(int cpu)
297{
298 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
299
300 if (pagevec_count(pvec))
301 pagevec_lru_move_fn(pvec, __activate_page, NULL);
302}
303
304void activate_page(struct page *page)
305{
306 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
307 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
308
309 page_cache_get(page);
310 if (!pagevec_add(pvec, page))
311 pagevec_lru_move_fn(pvec, __activate_page, NULL);
312 put_cpu_var(activate_page_pvecs);
313 }
314}
315
316#else
317static inline void activate_page_drain(int cpu)
318{
319}
320
321void activate_page(struct page *page)
322{
323 struct zone *zone = page_zone(page);
324
325 spin_lock_irq(&zone->lru_lock);
326 __activate_page(page, NULL);
295 spin_unlock_irq(&zone->lru_lock); 327 spin_unlock_irq(&zone->lru_lock);
296} 328}
329#endif
297 330
298/* 331/*
299 * Mark a page as having seen activity. 332 * Mark a page as having seen activity.
@@ -464,6 +497,8 @@ static void drain_cpu_pagevecs(int cpu)
464 pvec = &per_cpu(lru_deactivate_pvecs, cpu); 497 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
465 if (pagevec_count(pvec)) 498 if (pagevec_count(pvec))
466 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 499 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
500
501 activate_page_drain(cpu);
467} 502}
468 503
469/** 504/**
@@ -476,6 +511,13 @@ static void drain_cpu_pagevecs(int cpu)
476 */ 511 */
477void deactivate_page(struct page *page) 512void deactivate_page(struct page *page)
478{ 513{
514 /*
515 * In a workload with many unevictable page such as mprotect, unevictable
516 * page deactivation for accelerating reclaim is pointless.
517 */
518 if (PageUnevictable(page))
519 return;
520
479 if (likely(get_page_unless_zero(page))) { 521 if (likely(get_page_unless_zero(page))) {
480 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 522 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
481 523
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 8c6b3ce38f09..d537d29e9b7b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -31,6 +31,7 @@
31#include <linux/syscalls.h> 31#include <linux/syscalls.h>
32#include <linux/memcontrol.h> 32#include <linux/memcontrol.h>
33#include <linux/poll.h> 33#include <linux/poll.h>
34#include <linux/oom.h>
34 35
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include <asm/tlbflush.h> 37#include <asm/tlbflush.h>
@@ -1555,6 +1556,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1555 struct address_space *mapping; 1556 struct address_space *mapping;
1556 struct inode *inode; 1557 struct inode *inode;
1557 char *pathname; 1558 char *pathname;
1559 int oom_score_adj;
1558 int i, type, prev; 1560 int i, type, prev;
1559 int err; 1561 int err;
1560 1562
@@ -1613,9 +1615,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1613 p->flags &= ~SWP_WRITEOK; 1615 p->flags &= ~SWP_WRITEOK;
1614 spin_unlock(&swap_lock); 1616 spin_unlock(&swap_lock);
1615 1617
1616 current->flags |= PF_OOM_ORIGIN; 1618 oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
1617 err = try_to_unuse(type); 1619 err = try_to_unuse(type);
1618 current->flags &= ~PF_OOM_ORIGIN; 1620 test_set_oom_score_adj(oom_score_adj);
1619 1621
1620 if (err) { 1622 if (err) {
1621 /* 1623 /*
diff --git a/mm/util.c b/mm/util.c
index e7b103a6fd21..88ea1bd661c0 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -6,6 +6,8 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <asm/uaccess.h> 7#include <asm/uaccess.h>
8 8
9#include "internal.h"
10
9#define CREATE_TRACE_POINTS 11#define CREATE_TRACE_POINTS
10#include <trace/events/kmem.h> 12#include <trace/events/kmem.h>
11 13
@@ -215,6 +217,28 @@ char *strndup_user(const char __user *s, long n)
215} 217}
216EXPORT_SYMBOL(strndup_user); 218EXPORT_SYMBOL(strndup_user);
217 219
220void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
221 struct vm_area_struct *prev, struct rb_node *rb_parent)
222{
223 struct vm_area_struct *next;
224
225 vma->vm_prev = prev;
226 if (prev) {
227 next = prev->vm_next;
228 prev->vm_next = vma;
229 } else {
230 mm->mmap = vma;
231 if (rb_parent)
232 next = rb_entry(rb_parent,
233 struct vm_area_struct, vm_rb);
234 else
235 next = NULL;
236 }
237 vma->vm_next = next;
238 if (next)
239 next->vm_prev = vma;
240}
241
218#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 242#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
219void arch_pick_mmap_layout(struct mm_struct *mm) 243void arch_pick_mmap_layout(struct mm_struct *mm)
220{ 244{
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5d6030235d7a..b5ccf3158d82 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -375,7 +375,7 @@ nocache:
375 /* find starting point for our search */ 375 /* find starting point for our search */
376 if (free_vmap_cache) { 376 if (free_vmap_cache) {
377 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 377 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
378 addr = ALIGN(first->va_end + PAGE_SIZE, align); 378 addr = ALIGN(first->va_end, align);
379 if (addr < vstart) 379 if (addr < vstart)
380 goto nocache; 380 goto nocache;
381 if (addr + size - 1 < addr) 381 if (addr + size - 1 < addr)
@@ -406,10 +406,10 @@ nocache:
406 } 406 }
407 407
408 /* from the starting point, walk areas until a suitable hole is found */ 408 /* from the starting point, walk areas until a suitable hole is found */
409 while (addr + size >= first->va_start && addr + size <= vend) { 409 while (addr + size > first->va_start && addr + size <= vend) {
410 if (addr + cached_hole_size < first->va_start) 410 if (addr + cached_hole_size < first->va_start)
411 cached_hole_size = first->va_start - addr; 411 cached_hole_size = first->va_start - addr;
412 addr = ALIGN(first->va_end + PAGE_SIZE, align); 412 addr = ALIGN(first->va_end, align);
413 if (addr + size - 1 < addr) 413 if (addr + size - 1 < addr)
414 goto overflow; 414 goto overflow;
415 415
@@ -1534,6 +1534,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
1534static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1534static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1535 pgprot_t prot, int node, void *caller) 1535 pgprot_t prot, int node, void *caller)
1536{ 1536{
1537 const int order = 0;
1537 struct page **pages; 1538 struct page **pages;
1538 unsigned int nr_pages, array_size, i; 1539 unsigned int nr_pages, array_size, i;
1539 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1540 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
@@ -1560,11 +1561,12 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1560 1561
1561 for (i = 0; i < area->nr_pages; i++) { 1562 for (i = 0; i < area->nr_pages; i++) {
1562 struct page *page; 1563 struct page *page;
1564 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
1563 1565
1564 if (node < 0) 1566 if (node < 0)
1565 page = alloc_page(gfp_mask); 1567 page = alloc_page(tmp_mask);
1566 else 1568 else
1567 page = alloc_pages_node(node, gfp_mask, 0); 1569 page = alloc_pages_node(node, tmp_mask, order);
1568 1570
1569 if (unlikely(!page)) { 1571 if (unlikely(!page)) {
1570 /* Successfully allocated i pages, free them in __vunmap() */ 1572 /* Successfully allocated i pages, free them in __vunmap() */
@@ -1579,6 +1581,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1579 return area->addr; 1581 return area->addr;
1580 1582
1581fail: 1583fail:
1584 warn_alloc_failed(gfp_mask, order, "vmalloc: allocation failure, "
1585 "allocated %ld of %ld bytes\n",
1586 (area->nr_pages*PAGE_SIZE), area->size);
1582 vfree(area->addr); 1587 vfree(area->addr);
1583 return NULL; 1588 return NULL;
1584} 1589}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c9177202c8ce..7e0116150dc7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -202,6 +202,14 @@ void unregister_shrinker(struct shrinker *shrinker)
202} 202}
203EXPORT_SYMBOL(unregister_shrinker); 203EXPORT_SYMBOL(unregister_shrinker);
204 204
205static inline int do_shrinker_shrink(struct shrinker *shrinker,
206 struct shrink_control *sc,
207 unsigned long nr_to_scan)
208{
209 sc->nr_to_scan = nr_to_scan;
210 return (*shrinker->shrink)(shrinker, sc);
211}
212
205#define SHRINK_BATCH 128 213#define SHRINK_BATCH 128
206/* 214/*
207 * Call the shrink functions to age shrinkable caches 215 * Call the shrink functions to age shrinkable caches
@@ -222,25 +230,29 @@ EXPORT_SYMBOL(unregister_shrinker);
222 * 230 *
223 * Returns the number of slab objects which we shrunk. 231 * Returns the number of slab objects which we shrunk.
224 */ 232 */
225unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 233unsigned long shrink_slab(struct shrink_control *shrink,
226 unsigned long lru_pages) 234 unsigned long nr_pages_scanned,
235 unsigned long lru_pages)
227{ 236{
228 struct shrinker *shrinker; 237 struct shrinker *shrinker;
229 unsigned long ret = 0; 238 unsigned long ret = 0;
230 239
231 if (scanned == 0) 240 if (nr_pages_scanned == 0)
232 scanned = SWAP_CLUSTER_MAX; 241 nr_pages_scanned = SWAP_CLUSTER_MAX;
233 242
234 if (!down_read_trylock(&shrinker_rwsem)) 243 if (!down_read_trylock(&shrinker_rwsem)) {
235 return 1; /* Assume we'll be able to shrink next time */ 244 /* Assume we'll be able to shrink next time */
245 ret = 1;
246 goto out;
247 }
236 248
237 list_for_each_entry(shrinker, &shrinker_list, list) { 249 list_for_each_entry(shrinker, &shrinker_list, list) {
238 unsigned long long delta; 250 unsigned long long delta;
239 unsigned long total_scan; 251 unsigned long total_scan;
240 unsigned long max_pass; 252 unsigned long max_pass;
241 253
242 max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask); 254 max_pass = do_shrinker_shrink(shrinker, shrink, 0);
243 delta = (4 * scanned) / shrinker->seeks; 255 delta = (4 * nr_pages_scanned) / shrinker->seeks;
244 delta *= max_pass; 256 delta *= max_pass;
245 do_div(delta, lru_pages + 1); 257 do_div(delta, lru_pages + 1);
246 shrinker->nr += delta; 258 shrinker->nr += delta;
@@ -267,9 +279,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
267 int shrink_ret; 279 int shrink_ret;
268 int nr_before; 280 int nr_before;
269 281
270 nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask); 282 nr_before = do_shrinker_shrink(shrinker, shrink, 0);
271 shrink_ret = (*shrinker->shrink)(shrinker, this_scan, 283 shrink_ret = do_shrinker_shrink(shrinker, shrink,
272 gfp_mask); 284 this_scan);
273 if (shrink_ret == -1) 285 if (shrink_ret == -1)
274 break; 286 break;
275 if (shrink_ret < nr_before) 287 if (shrink_ret < nr_before)
@@ -283,6 +295,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
283 shrinker->nr += total_scan; 295 shrinker->nr += total_scan;
284 } 296 }
285 up_read(&shrinker_rwsem); 297 up_read(&shrinker_rwsem);
298out:
299 cond_resched();
286 return ret; 300 return ret;
287} 301}
288 302
@@ -1202,13 +1216,16 @@ int isolate_lru_page(struct page *page)
1202{ 1216{
1203 int ret = -EBUSY; 1217 int ret = -EBUSY;
1204 1218
1219 VM_BUG_ON(!page_count(page));
1220
1205 if (PageLRU(page)) { 1221 if (PageLRU(page)) {
1206 struct zone *zone = page_zone(page); 1222 struct zone *zone = page_zone(page);
1207 1223
1208 spin_lock_irq(&zone->lru_lock); 1224 spin_lock_irq(&zone->lru_lock);
1209 if (PageLRU(page) && get_page_unless_zero(page)) { 1225 if (PageLRU(page)) {
1210 int lru = page_lru(page); 1226 int lru = page_lru(page);
1211 ret = 0; 1227 ret = 0;
1228 get_page(page);
1212 ClearPageLRU(page); 1229 ClearPageLRU(page);
1213 1230
1214 del_page_from_lru_list(zone, page, lru); 1231 del_page_from_lru_list(zone, page, lru);
@@ -2027,7 +2044,8 @@ static bool all_unreclaimable(struct zonelist *zonelist,
2027 * else, the number of pages reclaimed 2044 * else, the number of pages reclaimed
2028 */ 2045 */
2029static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 2046static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2030 struct scan_control *sc) 2047 struct scan_control *sc,
2048 struct shrink_control *shrink)
2031{ 2049{
2032 int priority; 2050 int priority;
2033 unsigned long total_scanned = 0; 2051 unsigned long total_scanned = 0;
@@ -2061,7 +2079,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2061 lru_pages += zone_reclaimable_pages(zone); 2079 lru_pages += zone_reclaimable_pages(zone);
2062 } 2080 }
2063 2081
2064 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); 2082 shrink_slab(shrink, sc->nr_scanned, lru_pages);
2065 if (reclaim_state) { 2083 if (reclaim_state) {
2066 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2084 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2067 reclaim_state->reclaimed_slab = 0; 2085 reclaim_state->reclaimed_slab = 0;
@@ -2133,12 +2151,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2133 .mem_cgroup = NULL, 2151 .mem_cgroup = NULL,
2134 .nodemask = nodemask, 2152 .nodemask = nodemask,
2135 }; 2153 };
2154 struct shrink_control shrink = {
2155 .gfp_mask = sc.gfp_mask,
2156 };
2136 2157
2137 trace_mm_vmscan_direct_reclaim_begin(order, 2158 trace_mm_vmscan_direct_reclaim_begin(order,
2138 sc.may_writepage, 2159 sc.may_writepage,
2139 gfp_mask); 2160 gfp_mask);
2140 2161
2141 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2162 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2142 2163
2143 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 2164 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2144 2165
@@ -2198,17 +2219,20 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2198 .order = 0, 2219 .order = 0,
2199 .mem_cgroup = mem_cont, 2220 .mem_cgroup = mem_cont,
2200 .nodemask = NULL, /* we don't care the placement */ 2221 .nodemask = NULL, /* we don't care the placement */
2222 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2223 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2224 };
2225 struct shrink_control shrink = {
2226 .gfp_mask = sc.gfp_mask,
2201 }; 2227 };
2202 2228
2203 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2204 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2205 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 2229 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
2206 2230
2207 trace_mm_vmscan_memcg_reclaim_begin(0, 2231 trace_mm_vmscan_memcg_reclaim_begin(0,
2208 sc.may_writepage, 2232 sc.may_writepage,
2209 sc.gfp_mask); 2233 sc.gfp_mask);
2210 2234
2211 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2235 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2212 2236
2213 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2237 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2214 2238
@@ -2287,7 +2311,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
2287 * must be balanced 2311 * must be balanced
2288 */ 2312 */
2289 if (order) 2313 if (order)
2290 return pgdat_balanced(pgdat, balanced, classzone_idx); 2314 return !pgdat_balanced(pgdat, balanced, classzone_idx);
2291 else 2315 else
2292 return !all_zones_ok; 2316 return !all_zones_ok;
2293} 2317}
@@ -2336,6 +2360,9 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2336 .order = order, 2360 .order = order,
2337 .mem_cgroup = NULL, 2361 .mem_cgroup = NULL,
2338 }; 2362 };
2363 struct shrink_control shrink = {
2364 .gfp_mask = sc.gfp_mask,
2365 };
2339loop_again: 2366loop_again:
2340 total_scanned = 0; 2367 total_scanned = 0;
2341 sc.nr_reclaimed = 0; 2368 sc.nr_reclaimed = 0;
@@ -2435,8 +2462,7 @@ loop_again:
2435 end_zone, 0)) 2462 end_zone, 0))
2436 shrink_zone(priority, zone, &sc); 2463 shrink_zone(priority, zone, &sc);
2437 reclaim_state->reclaimed_slab = 0; 2464 reclaim_state->reclaimed_slab = 0;
2438 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2465 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2439 lru_pages);
2440 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2466 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2441 total_scanned += sc.nr_scanned; 2467 total_scanned += sc.nr_scanned;
2442 2468
@@ -2788,7 +2814,10 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2788 .swappiness = vm_swappiness, 2814 .swappiness = vm_swappiness,
2789 .order = 0, 2815 .order = 0,
2790 }; 2816 };
2791 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 2817 struct shrink_control shrink = {
2818 .gfp_mask = sc.gfp_mask,
2819 };
2820 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2792 struct task_struct *p = current; 2821 struct task_struct *p = current;
2793 unsigned long nr_reclaimed; 2822 unsigned long nr_reclaimed;
2794 2823
@@ -2797,7 +2826,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2797 reclaim_state.reclaimed_slab = 0; 2826 reclaim_state.reclaimed_slab = 0;
2798 p->reclaim_state = &reclaim_state; 2827 p->reclaim_state = &reclaim_state;
2799 2828
2800 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2829 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2801 2830
2802 p->reclaim_state = NULL; 2831 p->reclaim_state = NULL;
2803 lockdep_clear_current_reclaim_state(); 2832 lockdep_clear_current_reclaim_state();
@@ -2972,6 +3001,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2972 .swappiness = vm_swappiness, 3001 .swappiness = vm_swappiness,
2973 .order = order, 3002 .order = order,
2974 }; 3003 };
3004 struct shrink_control shrink = {
3005 .gfp_mask = sc.gfp_mask,
3006 };
2975 unsigned long nr_slab_pages0, nr_slab_pages1; 3007 unsigned long nr_slab_pages0, nr_slab_pages1;
2976 3008
2977 cond_resched(); 3009 cond_resched();
@@ -3013,7 +3045,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3013 unsigned long lru_pages = zone_reclaimable_pages(zone); 3045 unsigned long lru_pages = zone_reclaimable_pages(zone);
3014 3046
3015 /* No reclaimable slab or very low memory pressure */ 3047 /* No reclaimable slab or very low memory pressure */
3016 if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages)) 3048 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3017 break; 3049 break;
3018 3050
3019 /* Freed enough memory */ 3051 /* Freed enough memory */
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 897ea9e88238..20c18b7694b2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -157,7 +157,7 @@ int calculate_normal_threshold(struct zone *zone)
157/* 157/*
158 * Refresh the thresholds for each zone. 158 * Refresh the thresholds for each zone.
159 */ 159 */
160static void refresh_zone_stat_thresholds(void) 160void refresh_zone_stat_thresholds(void)
161{ 161{
162 struct zone *zone; 162 struct zone *zone;
163 int cpu; 163 int cpu;
@@ -659,6 +659,138 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
659} 659}
660#endif 660#endif
661 661
662#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS)
663#ifdef CONFIG_ZONE_DMA
664#define TEXT_FOR_DMA(xx) xx "_dma",
665#else
666#define TEXT_FOR_DMA(xx)
667#endif
668
669#ifdef CONFIG_ZONE_DMA32
670#define TEXT_FOR_DMA32(xx) xx "_dma32",
671#else
672#define TEXT_FOR_DMA32(xx)
673#endif
674
675#ifdef CONFIG_HIGHMEM
676#define TEXT_FOR_HIGHMEM(xx) xx "_high",
677#else
678#define TEXT_FOR_HIGHMEM(xx)
679#endif
680
681#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
682 TEXT_FOR_HIGHMEM(xx) xx "_movable",
683
684const char * const vmstat_text[] = {
685 /* Zoned VM counters */
686 "nr_free_pages",
687 "nr_inactive_anon",
688 "nr_active_anon",
689 "nr_inactive_file",
690 "nr_active_file",
691 "nr_unevictable",
692 "nr_mlock",
693 "nr_anon_pages",
694 "nr_mapped",
695 "nr_file_pages",
696 "nr_dirty",
697 "nr_writeback",
698 "nr_slab_reclaimable",
699 "nr_slab_unreclaimable",
700 "nr_page_table_pages",
701 "nr_kernel_stack",
702 "nr_unstable",
703 "nr_bounce",
704 "nr_vmscan_write",
705 "nr_writeback_temp",
706 "nr_isolated_anon",
707 "nr_isolated_file",
708 "nr_shmem",
709 "nr_dirtied",
710 "nr_written",
711
712#ifdef CONFIG_NUMA
713 "numa_hit",
714 "numa_miss",
715 "numa_foreign",
716 "numa_interleave",
717 "numa_local",
718 "numa_other",
719#endif
720 "nr_anon_transparent_hugepages",
721 "nr_dirty_threshold",
722 "nr_dirty_background_threshold",
723
724#ifdef CONFIG_VM_EVENT_COUNTERS
725 "pgpgin",
726 "pgpgout",
727 "pswpin",
728 "pswpout",
729
730 TEXTS_FOR_ZONES("pgalloc")
731
732 "pgfree",
733 "pgactivate",
734 "pgdeactivate",
735
736 "pgfault",
737 "pgmajfault",
738
739 TEXTS_FOR_ZONES("pgrefill")
740 TEXTS_FOR_ZONES("pgsteal")
741 TEXTS_FOR_ZONES("pgscan_kswapd")
742 TEXTS_FOR_ZONES("pgscan_direct")
743
744#ifdef CONFIG_NUMA
745 "zone_reclaim_failed",
746#endif
747 "pginodesteal",
748 "slabs_scanned",
749 "kswapd_steal",
750 "kswapd_inodesteal",
751 "kswapd_low_wmark_hit_quickly",
752 "kswapd_high_wmark_hit_quickly",
753 "kswapd_skip_congestion_wait",
754 "pageoutrun",
755 "allocstall",
756
757 "pgrotated",
758
759#ifdef CONFIG_COMPACTION
760 "compact_blocks_moved",
761 "compact_pages_moved",
762 "compact_pagemigrate_failed",
763 "compact_stall",
764 "compact_fail",
765 "compact_success",
766#endif
767
768#ifdef CONFIG_HUGETLB_PAGE
769 "htlb_buddy_alloc_success",
770 "htlb_buddy_alloc_fail",
771#endif
772 "unevictable_pgs_culled",
773 "unevictable_pgs_scanned",
774 "unevictable_pgs_rescued",
775 "unevictable_pgs_mlocked",
776 "unevictable_pgs_munlocked",
777 "unevictable_pgs_cleared",
778 "unevictable_pgs_stranded",
779 "unevictable_pgs_mlockfreed",
780
781#ifdef CONFIG_TRANSPARENT_HUGEPAGE
782 "thp_fault_alloc",
783 "thp_fault_fallback",
784 "thp_collapse_alloc",
785 "thp_collapse_alloc_failed",
786 "thp_split",
787#endif
788
789#endif /* CONFIG_VM_EVENTS_COUNTERS */
790};
791#endif /* CONFIG_PROC_FS || CONFIG_SYSFS */
792
793
662#ifdef CONFIG_PROC_FS 794#ifdef CONFIG_PROC_FS
663static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, 795static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
664 struct zone *zone) 796 struct zone *zone)
@@ -831,135 +963,6 @@ static const struct file_operations pagetypeinfo_file_ops = {
831 .release = seq_release, 963 .release = seq_release,
832}; 964};
833 965
834#ifdef CONFIG_ZONE_DMA
835#define TEXT_FOR_DMA(xx) xx "_dma",
836#else
837#define TEXT_FOR_DMA(xx)
838#endif
839
840#ifdef CONFIG_ZONE_DMA32
841#define TEXT_FOR_DMA32(xx) xx "_dma32",
842#else
843#define TEXT_FOR_DMA32(xx)
844#endif
845
846#ifdef CONFIG_HIGHMEM
847#define TEXT_FOR_HIGHMEM(xx) xx "_high",
848#else
849#define TEXT_FOR_HIGHMEM(xx)
850#endif
851
852#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
853 TEXT_FOR_HIGHMEM(xx) xx "_movable",
854
855static const char * const vmstat_text[] = {
856 /* Zoned VM counters */
857 "nr_free_pages",
858 "nr_inactive_anon",
859 "nr_active_anon",
860 "nr_inactive_file",
861 "nr_active_file",
862 "nr_unevictable",
863 "nr_mlock",
864 "nr_anon_pages",
865 "nr_mapped",
866 "nr_file_pages",
867 "nr_dirty",
868 "nr_writeback",
869 "nr_slab_reclaimable",
870 "nr_slab_unreclaimable",
871 "nr_page_table_pages",
872 "nr_kernel_stack",
873 "nr_unstable",
874 "nr_bounce",
875 "nr_vmscan_write",
876 "nr_writeback_temp",
877 "nr_isolated_anon",
878 "nr_isolated_file",
879 "nr_shmem",
880 "nr_dirtied",
881 "nr_written",
882
883#ifdef CONFIG_NUMA
884 "numa_hit",
885 "numa_miss",
886 "numa_foreign",
887 "numa_interleave",
888 "numa_local",
889 "numa_other",
890#endif
891 "nr_anon_transparent_hugepages",
892 "nr_dirty_threshold",
893 "nr_dirty_background_threshold",
894
895#ifdef CONFIG_VM_EVENT_COUNTERS
896 "pgpgin",
897 "pgpgout",
898 "pswpin",
899 "pswpout",
900
901 TEXTS_FOR_ZONES("pgalloc")
902
903 "pgfree",
904 "pgactivate",
905 "pgdeactivate",
906
907 "pgfault",
908 "pgmajfault",
909
910 TEXTS_FOR_ZONES("pgrefill")
911 TEXTS_FOR_ZONES("pgsteal")
912 TEXTS_FOR_ZONES("pgscan_kswapd")
913 TEXTS_FOR_ZONES("pgscan_direct")
914
915#ifdef CONFIG_NUMA
916 "zone_reclaim_failed",
917#endif
918 "pginodesteal",
919 "slabs_scanned",
920 "kswapd_steal",
921 "kswapd_inodesteal",
922 "kswapd_low_wmark_hit_quickly",
923 "kswapd_high_wmark_hit_quickly",
924 "kswapd_skip_congestion_wait",
925 "pageoutrun",
926 "allocstall",
927
928 "pgrotated",
929
930#ifdef CONFIG_COMPACTION
931 "compact_blocks_moved",
932 "compact_pages_moved",
933 "compact_pagemigrate_failed",
934 "compact_stall",
935 "compact_fail",
936 "compact_success",
937#endif
938
939#ifdef CONFIG_HUGETLB_PAGE
940 "htlb_buddy_alloc_success",
941 "htlb_buddy_alloc_fail",
942#endif
943 "unevictable_pgs_culled",
944 "unevictable_pgs_scanned",
945 "unevictable_pgs_rescued",
946 "unevictable_pgs_mlocked",
947 "unevictable_pgs_munlocked",
948 "unevictable_pgs_cleared",
949 "unevictable_pgs_stranded",
950 "unevictable_pgs_mlockfreed",
951
952#ifdef CONFIG_TRANSPARENT_HUGEPAGE
953 "thp_fault_alloc",
954 "thp_fault_fallback",
955 "thp_collapse_alloc",
956 "thp_collapse_alloc_failed",
957 "thp_split",
958#endif
959
960#endif /* CONFIG_VM_EVENTS_COUNTERS */
961};
962
963static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 966static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
964 struct zone *zone) 967 struct zone *zone)
965{ 968{
@@ -1198,7 +1201,6 @@ static int __init setup_vmstat(void)
1198#ifdef CONFIG_SMP 1201#ifdef CONFIG_SMP
1199 int cpu; 1202 int cpu;
1200 1203
1201 refresh_zone_stat_thresholds();
1202 register_cpu_notifier(&vmstat_notifier); 1204 register_cpu_notifier(&vmstat_notifier);
1203 1205
1204 for_each_online_cpu(cpu) 1206 for_each_online_cpu(cpu)
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index 7ed75c7bd5d1..d9ea09b11cf8 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -3,8 +3,8 @@
3# 3#
4 4
5menuconfig NET_9P 5menuconfig NET_9P
6 depends on NET && EXPERIMENTAL 6 depends on NET
7 tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)" 7 tristate "Plan 9 Resource Sharing Support (9P2000)"
8 help 8 help
9 If you say Y here, you will get experimental support for 9 If you say Y here, you will get experimental support for
10 Plan 9 resource sharing via the 9P2000 protocol. 10 Plan 9 resource sharing via the 9P2000 protocol.
@@ -16,8 +16,8 @@ menuconfig NET_9P
16if NET_9P 16if NET_9P
17 17
18config NET_9P_VIRTIO 18config NET_9P_VIRTIO
19 depends on EXPERIMENTAL && VIRTIO 19 depends on VIRTIO
20 tristate "9P Virtio Transport (Experimental)" 20 tristate "9P Virtio Transport"
21 help 21 help
22 This builds support for a transports between 22 This builds support for a transports between
23 guest partitions and a host partition. 23 guest partitions and a host partition.
diff --git a/net/9p/client.c b/net/9p/client.c
index ceab943dfc49..9e3b0e640da1 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -92,9 +92,6 @@ static int get_protocol_version(const substring_t *name)
92 return version; 92 return version;
93} 93}
94 94
95static struct p9_req_t *
96p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
97
98/** 95/**
99 * parse_options - parse mount options into client structure 96 * parse_options - parse mount options into client structure
100 * @opts: options string passed from mount 97 * @opts: options string passed from mount
@@ -307,12 +304,13 @@ static int p9_tag_init(struct p9_client *c)
307 c->tagpool = p9_idpool_create(); 304 c->tagpool = p9_idpool_create();
308 if (IS_ERR(c->tagpool)) { 305 if (IS_ERR(c->tagpool)) {
309 err = PTR_ERR(c->tagpool); 306 err = PTR_ERR(c->tagpool);
310 c->tagpool = NULL;
311 goto error; 307 goto error;
312 } 308 }
313 309 err = p9_idpool_get(c->tagpool); /* reserve tag 0 */
314 p9_idpool_get(c->tagpool); /* reserve tag 0 */ 310 if (err < 0) {
315 311 p9_idpool_destroy(c->tagpool);
312 goto error;
313 }
316 c->max_tag = 0; 314 c->max_tag = 0;
317error: 315error:
318 return err; 316 return err;
@@ -518,12 +516,15 @@ out_err:
518 return err; 516 return err;
519} 517}
520 518
519static struct p9_req_t *
520p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
521
521/** 522/**
522 * p9_client_flush - flush (cancel) a request 523 * p9_client_flush - flush (cancel) a request
523 * @c: client state 524 * @c: client state
524 * @oldreq: request to cancel 525 * @oldreq: request to cancel
525 * 526 *
526 * This sents a flush for a particular requests and links 527 * This sents a flush for a particular request and links
527 * the flush request to the original request. The current 528 * the flush request to the original request. The current
528 * code only supports a single flush request although the protocol 529 * code only supports a single flush request although the protocol
529 * allows for multiple flush requests to be sent for a single request. 530 * allows for multiple flush requests to be sent for a single request.
@@ -789,11 +790,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
789 spin_lock_init(&clnt->lock); 790 spin_lock_init(&clnt->lock);
790 INIT_LIST_HEAD(&clnt->fidlist); 791 INIT_LIST_HEAD(&clnt->fidlist);
791 792
792 p9_tag_init(clnt); 793 err = p9_tag_init(clnt);
794 if (err < 0)
795 goto free_client;
793 796
794 err = parse_opts(options, clnt); 797 err = parse_opts(options, clnt);
795 if (err < 0) 798 if (err < 0)
796 goto free_client; 799 goto destroy_tagpool;
797 800
798 if (!clnt->trans_mod) 801 if (!clnt->trans_mod)
799 clnt->trans_mod = v9fs_get_default_trans(); 802 clnt->trans_mod = v9fs_get_default_trans();
@@ -802,13 +805,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
802 err = -EPROTONOSUPPORT; 805 err = -EPROTONOSUPPORT;
803 P9_DPRINTK(P9_DEBUG_ERROR, 806 P9_DPRINTK(P9_DEBUG_ERROR,
804 "No transport defined or default transport\n"); 807 "No transport defined or default transport\n");
805 goto free_client; 808 goto destroy_tagpool;
806 } 809 }
807 810
808 clnt->fidpool = p9_idpool_create(); 811 clnt->fidpool = p9_idpool_create();
809 if (IS_ERR(clnt->fidpool)) { 812 if (IS_ERR(clnt->fidpool)) {
810 err = PTR_ERR(clnt->fidpool); 813 err = PTR_ERR(clnt->fidpool);
811 clnt->fidpool = NULL;
812 goto put_trans; 814 goto put_trans;
813 } 815 }
814 816
@@ -834,6 +836,8 @@ destroy_fidpool:
834 p9_idpool_destroy(clnt->fidpool); 836 p9_idpool_destroy(clnt->fidpool);
835put_trans: 837put_trans:
836 v9fs_put_trans(clnt->trans_mod); 838 v9fs_put_trans(clnt->trans_mod);
839destroy_tagpool:
840 p9_idpool_destroy(clnt->tagpool);
837free_client: 841free_client:
838 kfree(clnt); 842 kfree(clnt);
839 return ERR_PTR(err); 843 return ERR_PTR(err);
@@ -1298,7 +1302,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1298 if (count < rsize) 1302 if (count < rsize)
1299 rsize = count; 1303 rsize = count;
1300 1304
1301 /* Don't bother zerocopy form small IO (< 1024) */ 1305 /* Don't bother zerocopy for small IO (< 1024) */
1302 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 1306 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
1303 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) { 1307 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
1304 req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset, 1308 req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset,
diff --git a/net/9p/mod.c b/net/9p/mod.c
index cf8a4128cd5c..72c398275051 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -139,7 +139,7 @@ void v9fs_put_trans(struct p9_trans_module *m)
139} 139}
140 140
141/** 141/**
142 * v9fs_init - Initialize module 142 * init_p9 - Initialize module
143 * 143 *
144 */ 144 */
145static int __init init_p9(void) 145static int __init init_p9(void)
@@ -154,7 +154,7 @@ static int __init init_p9(void)
154} 154}
155 155
156/** 156/**
157 * v9fs_init - shutdown module 157 * exit_p9 - shutdown module
158 * 158 *
159 */ 159 */
160 160
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 4a9084395d35..fdfdb5747f63 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -916,8 +916,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
916 sin_server.sin_family = AF_INET; 916 sin_server.sin_family = AF_INET;
917 sin_server.sin_addr.s_addr = in_aton(addr); 917 sin_server.sin_addr.s_addr = in_aton(addr);
918 sin_server.sin_port = htons(opts.port); 918 sin_server.sin_port = htons(opts.port);
919 err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); 919 err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
920 920 SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
921 if (err) { 921 if (err) {
922 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n"); 922 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
923 return err; 923 return err;
@@ -954,7 +954,8 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
954 954
955 sun_server.sun_family = PF_UNIX; 955 sun_server.sun_family = PF_UNIX;
956 strcpy(sun_server.sun_path, addr); 956 strcpy(sun_server.sun_path, addr);
957 err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); 957 err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
958 SOCK_STREAM, 0, &csocket, 1);
958 if (err < 0) { 959 if (err < 0) {
959 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n"); 960 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
960 return err; 961 return err;
diff --git a/net/9p/util.c b/net/9p/util.c
index da6af81e59d9..9c1c9348ac35 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -93,7 +93,7 @@ int p9_idpool_get(struct p9_idpool *p)
93 93
94retry: 94retry:
95 if (idr_pre_get(&p->pool, GFP_NOFS) == 0) 95 if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
96 return 0; 96 return -1;
97 97
98 spin_lock_irqsave(&p->lock, flags); 98 spin_lock_irqsave(&p->lock, flags);
99 99
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index e15a82ccc05f..78b55f49de7c 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -76,7 +76,8 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
76 break; 76 break;
77 77
78 default: 78 default:
79 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family); 79 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %d)",
80 (int)ss->ss_family);
80 } 81 }
81 82
82 return s; 83 return s;
@@ -598,7 +599,7 @@ static void prepare_write_keepalive(struct ceph_connection *con)
598 * Connection negotiation. 599 * Connection negotiation.
599 */ 600 */
600 601
601static void prepare_connect_authorizer(struct ceph_connection *con) 602static int prepare_connect_authorizer(struct ceph_connection *con)
602{ 603{
603 void *auth_buf; 604 void *auth_buf;
604 int auth_len = 0; 605 int auth_len = 0;
@@ -612,13 +613,20 @@ static void prepare_connect_authorizer(struct ceph_connection *con)
612 con->auth_retry); 613 con->auth_retry);
613 mutex_lock(&con->mutex); 614 mutex_lock(&con->mutex);
614 615
616 if (test_bit(CLOSED, &con->state) ||
617 test_bit(OPENING, &con->state))
618 return -EAGAIN;
619
615 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol); 620 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
616 con->out_connect.authorizer_len = cpu_to_le32(auth_len); 621 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
617 622
618 con->out_kvec[con->out_kvec_left].iov_base = auth_buf; 623 if (auth_len) {
619 con->out_kvec[con->out_kvec_left].iov_len = auth_len; 624 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
620 con->out_kvec_left++; 625 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
621 con->out_kvec_bytes += auth_len; 626 con->out_kvec_left++;
627 con->out_kvec_bytes += auth_len;
628 }
629 return 0;
622} 630}
623 631
624/* 632/*
@@ -640,9 +648,9 @@ static void prepare_write_banner(struct ceph_messenger *msgr,
640 set_bit(WRITE_PENDING, &con->state); 648 set_bit(WRITE_PENDING, &con->state);
641} 649}
642 650
643static void prepare_write_connect(struct ceph_messenger *msgr, 651static int prepare_write_connect(struct ceph_messenger *msgr,
644 struct ceph_connection *con, 652 struct ceph_connection *con,
645 int after_banner) 653 int after_banner)
646{ 654{
647 unsigned global_seq = get_global_seq(con->msgr, 0); 655 unsigned global_seq = get_global_seq(con->msgr, 0);
648 int proto; 656 int proto;
@@ -683,7 +691,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr,
683 con->out_more = 0; 691 con->out_more = 0;
684 set_bit(WRITE_PENDING, &con->state); 692 set_bit(WRITE_PENDING, &con->state);
685 693
686 prepare_connect_authorizer(con); 694 return prepare_connect_authorizer(con);
687} 695}
688 696
689 697
@@ -1065,8 +1073,10 @@ static void addr_set_port(struct sockaddr_storage *ss, int p)
1065 switch (ss->ss_family) { 1073 switch (ss->ss_family) {
1066 case AF_INET: 1074 case AF_INET:
1067 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1075 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1076 break;
1068 case AF_INET6: 1077 case AF_INET6:
1069 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1078 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1079 break;
1070 } 1080 }
1071} 1081}
1072 1082
@@ -1216,6 +1226,7 @@ static int process_connect(struct ceph_connection *con)
1216 u64 sup_feat = con->msgr->supported_features; 1226 u64 sup_feat = con->msgr->supported_features;
1217 u64 req_feat = con->msgr->required_features; 1227 u64 req_feat = con->msgr->required_features;
1218 u64 server_feat = le64_to_cpu(con->in_reply.features); 1228 u64 server_feat = le64_to_cpu(con->in_reply.features);
1229 int ret;
1219 1230
1220 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1231 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1221 1232
@@ -1250,7 +1261,9 @@ static int process_connect(struct ceph_connection *con)
1250 return -1; 1261 return -1;
1251 } 1262 }
1252 con->auth_retry = 1; 1263 con->auth_retry = 1;
1253 prepare_write_connect(con->msgr, con, 0); 1264 ret = prepare_write_connect(con->msgr, con, 0);
1265 if (ret < 0)
1266 return ret;
1254 prepare_read_connect(con); 1267 prepare_read_connect(con);
1255 break; 1268 break;
1256 1269
@@ -1277,6 +1290,9 @@ static int process_connect(struct ceph_connection *con)
1277 if (con->ops->peer_reset) 1290 if (con->ops->peer_reset)
1278 con->ops->peer_reset(con); 1291 con->ops->peer_reset(con);
1279 mutex_lock(&con->mutex); 1292 mutex_lock(&con->mutex);
1293 if (test_bit(CLOSED, &con->state) ||
1294 test_bit(OPENING, &con->state))
1295 return -EAGAIN;
1280 break; 1296 break;
1281 1297
1282 case CEPH_MSGR_TAG_RETRY_SESSION: 1298 case CEPH_MSGR_TAG_RETRY_SESSION:
@@ -1341,7 +1357,9 @@ static int process_connect(struct ceph_connection *con)
1341 * to WAIT. This shouldn't happen if we are the 1357 * to WAIT. This shouldn't happen if we are the
1342 * client. 1358 * client.
1343 */ 1359 */
1344 pr_err("process_connect peer connecting WAIT\n"); 1360 pr_err("process_connect got WAIT as client\n");
1361 con->error_msg = "protocol error, got WAIT as client";
1362 return -1;
1345 1363
1346 default: 1364 default:
1347 pr_err("connect protocol error, will retry\n"); 1365 pr_err("connect protocol error, will retry\n");
@@ -1810,6 +1828,17 @@ static int try_read(struct ceph_connection *con)
1810more: 1828more:
1811 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 1829 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1812 con->in_base_pos); 1830 con->in_base_pos);
1831
1832 /*
1833 * process_connect and process_message drop and re-take
1834 * con->mutex. make sure we handle a racing close or reopen.
1835 */
1836 if (test_bit(CLOSED, &con->state) ||
1837 test_bit(OPENING, &con->state)) {
1838 ret = -EAGAIN;
1839 goto out;
1840 }
1841
1813 if (test_bit(CONNECTING, &con->state)) { 1842 if (test_bit(CONNECTING, &con->state)) {
1814 if (!test_bit(NEGOTIATING, &con->state)) { 1843 if (!test_bit(NEGOTIATING, &con->state)) {
1815 dout("try_read connecting\n"); 1844 dout("try_read connecting\n");
@@ -1938,8 +1967,10 @@ static void con_work(struct work_struct *work)
1938{ 1967{
1939 struct ceph_connection *con = container_of(work, struct ceph_connection, 1968 struct ceph_connection *con = container_of(work, struct ceph_connection,
1940 work.work); 1969 work.work);
1970 int ret;
1941 1971
1942 mutex_lock(&con->mutex); 1972 mutex_lock(&con->mutex);
1973restart:
1943 if (test_and_clear_bit(BACKOFF, &con->state)) { 1974 if (test_and_clear_bit(BACKOFF, &con->state)) {
1944 dout("con_work %p backing off\n", con); 1975 dout("con_work %p backing off\n", con);
1945 if (queue_delayed_work(ceph_msgr_wq, &con->work, 1976 if (queue_delayed_work(ceph_msgr_wq, &con->work,
@@ -1969,18 +2000,31 @@ static void con_work(struct work_struct *work)
1969 con_close_socket(con); 2000 con_close_socket(con);
1970 } 2001 }
1971 2002
1972 if (test_and_clear_bit(SOCK_CLOSED, &con->state) || 2003 if (test_and_clear_bit(SOCK_CLOSED, &con->state))
1973 try_read(con) < 0 || 2004 goto fault;
1974 try_write(con) < 0) { 2005
1975 mutex_unlock(&con->mutex); 2006 ret = try_read(con);
1976 ceph_fault(con); /* error/fault path */ 2007 if (ret == -EAGAIN)
1977 goto done_unlocked; 2008 goto restart;
1978 } 2009 if (ret < 0)
2010 goto fault;
2011
2012 ret = try_write(con);
2013 if (ret == -EAGAIN)
2014 goto restart;
2015 if (ret < 0)
2016 goto fault;
1979 2017
1980done: 2018done:
1981 mutex_unlock(&con->mutex); 2019 mutex_unlock(&con->mutex);
1982done_unlocked: 2020done_unlocked:
1983 con->ops->put(con); 2021 con->ops->put(con);
2022 return;
2023
2024fault:
2025 mutex_unlock(&con->mutex);
2026 ceph_fault(con); /* error/fault path */
2027 goto done_unlocked;
1984} 2028}
1985 2029
1986 2030
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6b5dda1cb5df..6ea2b892f44b 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -124,7 +124,7 @@ static void calc_layout(struct ceph_osd_client *osdc,
124 ceph_calc_raw_layout(osdc, layout, vino.snap, off, 124 ceph_calc_raw_layout(osdc, layout, vino.snap, off,
125 plen, &bno, req, op); 125 plen, &bno, req, op);
126 126
127 sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno); 127 snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
128 req->r_oid_len = strlen(req->r_oid); 128 req->r_oid_len = strlen(req->r_oid);
129} 129}
130 130
@@ -1421,6 +1421,15 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1421done: 1421done:
1422 downgrade_write(&osdc->map_sem); 1422 downgrade_write(&osdc->map_sem);
1423 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); 1423 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
1424
1425 /*
1426 * subscribe to subsequent osdmap updates if full to ensure
1427 * we find out when we are no longer full and stop returning
1428 * ENOSPC.
1429 */
1430 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
1431 ceph_monc_request_next_osdmap(&osdc->client->monc);
1432
1424 send_queued(osdc); 1433 send_queued(osdc);
1425 up_read(&osdc->map_sem); 1434 up_read(&osdc->map_sem);
1426 wake_up_all(&osdc->client->auth_wq); 1435 wake_up_all(&osdc->client->auth_wq);
@@ -1677,8 +1686,14 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1677 */ 1686 */
1678 if (req->r_sent == 0) { 1687 if (req->r_sent == 0) {
1679 rc = __map_request(osdc, req); 1688 rc = __map_request(osdc, req);
1680 if (rc < 0) 1689 if (rc < 0) {
1690 if (nofail) {
1691 dout("osdc_start_request failed map, "
1692 " will retry %lld\n", req->r_tid);
1693 rc = 0;
1694 }
1681 goto out_unlock; 1695 goto out_unlock;
1696 }
1682 if (req->r_osd == NULL) { 1697 if (req->r_osd == NULL) {
1683 dout("send_request %p no up osds in pg\n", req); 1698 dout("send_request %p no up osds in pg\n", req);
1684 ceph_monc_request_next_osdmap(&osdc->client->monc); 1699 ceph_monc_request_next_osdmap(&osdc->client->monc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 71603ac3dff5..e97c3588c3ec 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -765,7 +765,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
765 } 765 }
766 766
767 map->epoch++; 767 map->epoch++;
768 map->modified = map->modified; 768 map->modified = modified;
769 if (newcrush) { 769 if (newcrush) {
770 if (map->crush) 770 if (map->crush)
771 crush_destroy(map->crush); 771 crush_destroy(map->crush);
@@ -830,15 +830,20 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
830 map->osd_addr[osd] = addr; 830 map->osd_addr[osd] = addr;
831 } 831 }
832 832
833 /* new_down */ 833 /* new_state */
834 ceph_decode_32_safe(p, end, len, bad); 834 ceph_decode_32_safe(p, end, len, bad);
835 while (len--) { 835 while (len--) {
836 u32 osd; 836 u32 osd;
837 u8 xorstate;
837 ceph_decode_32_safe(p, end, osd, bad); 838 ceph_decode_32_safe(p, end, osd, bad);
839 xorstate = **(u8 **)p;
838 (*p)++; /* clean flag */ 840 (*p)++; /* clean flag */
839 pr_info("osd%d down\n", osd); 841 if (xorstate == 0)
842 xorstate = CEPH_OSD_UP;
843 if (xorstate & CEPH_OSD_UP)
844 pr_info("osd%d down\n", osd);
840 if (osd < map->max_osd) 845 if (osd < map->max_osd)
841 map->osd_state[osd] &= ~CEPH_OSD_UP; 846 map->osd_state[osd] ^= xorstate;
842 } 847 }
843 848
844 /* new_weight */ 849 /* new_weight */
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 67e31276682a..cd6e4aa19dbf 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -326,10 +326,12 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
326 * Run memory cache shrinker. 326 * Run memory cache shrinker.
327 */ 327 */
328static int 328static int
329rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 329rpcauth_cache_shrinker(struct shrinker *shrink, struct shrink_control *sc)
330{ 330{
331 LIST_HEAD(free); 331 LIST_HEAD(free);
332 int res; 332 int res;
333 int nr_to_scan = sc->nr_to_scan;
334 gfp_t gfp_mask = sc->gfp_mask;
333 335
334 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) 336 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
335 return (nr_to_scan == 0) ? 0 : -1; 337 return (nr_to_scan == 0) ? 0 : -1;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index d8670810db65..8657f99bfb2b 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -210,10 +210,10 @@ our $typeTypedefs = qr{(?x:
210 210
211our $logFunctions = qr{(?x: 211our $logFunctions = qr{(?x:
212 printk| 212 printk|
213 pr_(debug|dbg|vdbg|devel|info|warning|err|notice|alert|crit|emerg|cont)| 213 [a-z]+_(emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)|
214 (dev|netdev|netif)_(printk|dbg|vdbg|info|warn|err|notice|alert|crit|emerg|WARN)|
215 WARN| 214 WARN|
216 panic 215 panic|
216 MODULE_[A-Z_]+
217)}; 217)};
218 218
219our @typeList = ( 219our @typeList = (
@@ -1462,7 +1462,7 @@ sub process {
1462#80 column limit 1462#80 column limit
1463 if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ && 1463 if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
1464 $rawline !~ /^.\s*\*\s*\@$Ident\s/ && 1464 $rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
1465 !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?"[X\t]*"\s*(?:,|\)\s*;)\s*$/ || 1465 !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?"[X\t]*"\s*(?:|,|\)\s*;)\s*$/ ||
1466 $line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) && 1466 $line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) &&
1467 $length > 80) 1467 $length > 80)
1468 { 1468 {
@@ -2748,6 +2748,11 @@ sub process {
2748 WARN("sizeof(& should be avoided\n" . $herecurr); 2748 WARN("sizeof(& should be avoided\n" . $herecurr);
2749 } 2749 }
2750 2750
2751# check for line continuations in quoted strings with odd counts of "
2752 if ($rawline =~ /\\$/ && $rawline =~ tr/"/"/ % 2) {
2753 WARN("Avoid line continuations in quoted strings\n" . $herecurr);
2754 }
2755
2751# check for new externs in .c files. 2756# check for new externs in .c files.
2752 if ($realfile =~ /\.c$/ && defined $stat && 2757 if ($realfile =~ /\.c$/ && defined $stat &&
2753 $stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s) 2758 $stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
diff --git a/scripts/checkversion.pl b/scripts/checkversion.pl
index b444e89a0095..5e490a8ceca5 100755
--- a/scripts/checkversion.pl
+++ b/scripts/checkversion.pl
@@ -12,6 +12,7 @@ $| = 1;
12my $debugging; 12my $debugging;
13 13
14foreach my $file (@ARGV) { 14foreach my $file (@ARGV) {
15 next if $file =~ "include/linux/version\.h";
15 # Open this file. 16 # Open this file.
16 open( my $f, '<', $file ) 17 open( my $f, '<', $file )
17 or die "Can't open $file: $!\n"; 18 or die "Can't open $file: $!\n";
diff --git a/scripts/export_report.pl b/scripts/export_report.pl
index 04dce7c15f83..8f79b701de87 100644
--- a/scripts/export_report.pl
+++ b/scripts/export_report.pl
@@ -25,11 +25,12 @@ sub alphabetically {
25sub print_depends_on { 25sub print_depends_on {
26 my ($href) = @_; 26 my ($href) = @_;
27 print "\n"; 27 print "\n";
28 while (my ($mod, $list) = each %$href) { 28 for my $mod (sort keys %$href) {
29 my $list = $href->{$mod};
29 print "\t$mod:\n"; 30 print "\t$mod:\n";
30 foreach my $sym (sort numerically @{$list}) { 31 foreach my $sym (sort numerically @{$list}) {
31 my ($symbol, $no) = split /\s+/, $sym; 32 my ($symbol, $no) = split /\s+/, $sym;
32 printf("\t\t%-25s\t%-25d\n", $symbol, $no); 33 printf("\t\t%-25s\n", $symbol);
33 } 34 }
34 print "\n"; 35 print "\n";
35 } 36 }
@@ -49,8 +50,14 @@ sub usage {
49} 50}
50 51
51sub collectcfiles { 52sub collectcfiles {
52 my @file 53 my @file;
53 = `cat .tmp_versions/*.mod | grep '.*\.ko\$' | sed s/\.ko$/.mod.c/`; 54 while (<.tmp_versions/*.mod>) {
55 open my $fh, '<', $_ or die "cannot open $_: $!\n";
56 push (@file,
57 grep s/\.ko/.mod.c/, # change the suffix
58 grep m/.+\.ko/, # find the .ko path
59 <$fh>); # lines in opened file
60 }
54 chomp @file; 61 chomp @file;
55 return @file; 62 return @file;
56} 63}
@@ -95,6 +102,8 @@ close($module_symvers);
95# 102#
96# collect the usage count of each symbol. 103# collect the usage count of each symbol.
97# 104#
105my $modversion_warnings = 0;
106
98foreach my $thismod (@allcfiles) { 107foreach my $thismod (@allcfiles) {
99 my $module; 108 my $module;
100 109
@@ -125,7 +134,8 @@ foreach my $thismod (@allcfiles) {
125 } 134 }
126 } 135 }
127 if ($state != 2) { 136 if ($state != 2) {
128 print "WARNING:$thismod is not built with CONFIG_MODVERSION enabled\n"; 137 warn "WARNING:$thismod is not built with CONFIG_MODVERSIONS enabled\n";
138 $modversion_warnings++;
129 } 139 }
130 close($module); 140 close($module);
131} 141}
@@ -159,8 +169,12 @@ printf("SECTION 2:\n\tThis section reports export-symbol-usage of in-kernel
159modules. Each module lists the modules, and the symbols from that module that 169modules. Each module lists the modules, and the symbols from that module that
160it uses. Each listed symbol reports the number of modules using it\n"); 170it uses. Each listed symbol reports the number of modules using it\n");
161 171
172print "\nNOTE: Got $modversion_warnings CONFIG_MODVERSIONS warnings\n\n"
173 if $modversion_warnings;
174
162print "~"x80 , "\n"; 175print "~"x80 , "\n";
163while (my ($thismod, $list) = each %MODULE) { 176for my $thismod (sort keys %MODULE) {
177 my $list = $MODULE{$thismod};
164 my %depends; 178 my %depends;
165 $thismod =~ s/\.mod\.c/.ko/; 179 $thismod =~ s/\.mod\.c/.ko/;
166 print "\t\t\t$thismod\n"; 180 print "\t\t\t$thismod\n";
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index a834b935f536..006960ebbce9 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -26,9 +26,9 @@ RPM := $(shell if [ -x "/usr/bin/rpmbuild" ]; then echo rpmbuild; \
26 else echo rpm; fi) 26 else echo rpm; fi)
27 27
28# Remove hyphens since they have special meaning in RPM filenames 28# Remove hyphens since they have special meaning in RPM filenames
29KERNELPATH := kernel-$(subst -,,$(KERNELRELEASE)) 29KERNELPATH := kernel-$(subst -,_,$(KERNELRELEASE))
30MKSPEC := $(srctree)/scripts/package/mkspec 30MKSPEC := $(srctree)/scripts/package/mkspec
31PREV := set -e; cd ..; 31PREV := set -e; cd -P ..;
32 32
33# rpm-pkg 33# rpm-pkg
34# --------------------------------------------------------------------------- 34# ---------------------------------------------------------------------------
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index e1c1d5b8ca70..4bf17ddf7c7f 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -22,7 +22,7 @@ if [ "`grep CONFIG_DRM=y .config | cut -f2 -d\=`" = "y" ]; then
22fi 22fi
23 23
24PROVIDES="$PROVIDES kernel-$KERNELRELEASE" 24PROVIDES="$PROVIDES kernel-$KERNELRELEASE"
25__KERNELRELEASE=`echo $KERNELRELEASE | sed -e "s/-//g"` 25__KERNELRELEASE=`echo $KERNELRELEASE | sed -e "s/-/_/g"`
26 26
27echo "Name: kernel" 27echo "Name: kernel"
28echo "Summary: The Linux Kernel" 28echo "Summary: The Linux Kernel"
@@ -47,6 +47,18 @@ echo ""
47echo "%description" 47echo "%description"
48echo "The Linux Kernel, the operating system core itself" 48echo "The Linux Kernel, the operating system core itself"
49echo "" 49echo ""
50echo "%package headers"
51echo "Summary: Header files for the Linux kernel for use by glibc"
52echo "Group: Development/System"
53echo "Obsoletes: kernel-headers"
54echo "Provides: kernel-headers = %{version}"
55echo "%description headers"
56echo "Kernel-headers includes the C header files that specify the interface"
57echo "between the Linux kernel and userspace libraries and programs. The"
58echo "header files define structures and constants that are needed for"
59echo "building most standard programs and are also needed for rebuilding the"
60echo "glibc package."
61echo ""
50 62
51if ! $PREBUILT; then 63if ! $PREBUILT; then
52echo "%prep" 64echo "%prep"
@@ -83,6 +95,7 @@ echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$KERNELRELEASE"
83echo "%endif" 95echo "%endif"
84echo "%endif" 96echo "%endif"
85 97
98echo 'make %{?_smp_mflags} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install'
86echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE" 99echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$KERNELRELEASE"
87 100
88echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$KERNELRELEASE" 101echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$KERNELRELEASE"
@@ -105,3 +118,7 @@ echo "/lib/modules/$KERNELRELEASE"
105echo "/lib/firmware" 118echo "/lib/firmware"
106echo "/boot/*" 119echo "/boot/*"
107echo "" 120echo ""
121echo "%files headers"
122echo '%defattr (-, root, root)'
123echo "/usr/include"
124echo ""
diff --git a/scripts/patch-kernel b/scripts/patch-kernel
index 46a59cae3a0a..20fb25c23382 100755
--- a/scripts/patch-kernel
+++ b/scripts/patch-kernel
@@ -250,7 +250,7 @@ while : # incrementing SUBLEVEL (s in v.p.s)
250do 250do
251 CURRENTFULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL" 251 CURRENTFULLVERSION="$VERSION.$PATCHLEVEL.$SUBLEVEL"
252 EXTRAVER= 252 EXTRAVER=
253 if [ $STOPFULLVERSION = $CURRENTFULLVERSION ]; then 253 if [ x$STOPFULLVERSION = x$CURRENTFULLVERSION ]; then
254 echo "Stopping at $CURRENTFULLVERSION base as requested." 254 echo "Stopping at $CURRENTFULLVERSION base as requested."
255 break 255 break
256 fi 256 fi